2024-11-20 11:19:01,843 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 11:19:01,860 main DEBUG Took 0.014193 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 11:19:01,860 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 11:19:01,861 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 11:19:01,862 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 11:19:01,864 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,873 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 11:19:01,888 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,890 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,891 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,892 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,892 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,893 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,894 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,894 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,895 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,895 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,896 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,897 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,898 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,898 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,899 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,899 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,900 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,900 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,901 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,901 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,902 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,902 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,903 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,903 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 11:19:01,904 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,904 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 11:19:01,906 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 11:19:01,908 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 11:19:01,910 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 11:19:01,911 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 11:19:01,912 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 11:19:01,913 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 11:19:01,923 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 11:19:01,926 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 11:19:01,929 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 11:19:01,929 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 11:19:01,929 main DEBUG createAppenders(={Console}) 2024-11-20 11:19:01,930 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-20 11:19:01,931 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 11:19:01,931 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-20 11:19:01,932 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 11:19:01,932 main DEBUG OutputStream closed 2024-11-20 11:19:01,933 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 11:19:01,933 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 11:19:01,933 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-20 11:19:02,005 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 11:19:02,007 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 11:19:02,008 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 11:19:02,009 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 11:19:02,010 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 11:19:02,010 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 11:19:02,010 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 11:19:02,011 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 11:19:02,011 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 11:19:02,011 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 11:19:02,011 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 11:19:02,012 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 11:19:02,012 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 11:19:02,012 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 11:19:02,013 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 11:19:02,013 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 11:19:02,013 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 11:19:02,014 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 11:19:02,016 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 11:19:02,017 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-20 11:19:02,017 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 11:19:02,018 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-20T11:19:02,317 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0 2024-11-20 11:19:02,320 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 11:19:02,321 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T11:19:02,330 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-20T11:19:02,356 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T11:19:02,359 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/cluster_e3ab1e07-46f1-e4fd-8280-4ad2ac7fbc56, deleteOnExit=true 2024-11-20T11:19:02,359 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-20T11:19:02,360 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/test.cache.data in system properties and HBase conf 2024-11-20T11:19:02,361 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T11:19:02,361 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/hadoop.log.dir in system properties and HBase conf 2024-11-20T11:19:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T11:19:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T11:19:02,363 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-20T11:19:02,461 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T11:19:02,561 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T11:19:02,565 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T11:19:02,566 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T11:19:02,566 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T11:19:02,567 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T11:19:02,567 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T11:19:02,567 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T11:19:02,568 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T11:19:02,568 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T11:19:02,569 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T11:19:02,569 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/nfs.dump.dir in system properties and HBase conf 2024-11-20T11:19:02,569 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/java.io.tmpdir in system properties and HBase conf 2024-11-20T11:19:02,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T11:19:02,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T11:19:02,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T11:19:03,381 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T11:19:03,465 INFO [Time-limited test {}] log.Log(170): Logging initialized @2343ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T11:19:03,544 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T11:19:03,614 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T11:19:03,636 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T11:19:03,637 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T11:19:03,638 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T11:19:03,652 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T11:19:03,654 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/hadoop.log.dir/,AVAILABLE} 2024-11-20T11:19:03,656 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T11:19:03,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/java.io.tmpdir/jetty-localhost-35543-hadoop-hdfs-3_4_1-tests_jar-_-any-7457455254688037154/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T11:19:03,867 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:35543} 2024-11-20T11:19:03,867 INFO [Time-limited test {}] server.Server(415): Started @2745ms 2024-11-20T11:19:04,282 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T11:19:04,289 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T11:19:04,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T11:19:04,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T11:19:04,290 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T11:19:04,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/hadoop.log.dir/,AVAILABLE} 2024-11-20T11:19:04,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T11:19:04,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/java.io.tmpdir/jetty-localhost-40723-hadoop-hdfs-3_4_1-tests_jar-_-any-14604501877587704337/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T11:19:04,412 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:40723} 2024-11-20T11:19:04,412 INFO [Time-limited test {}] server.Server(415): Started @3290ms 2024-11-20T11:19:04,472 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T11:19:04,914 WARN [Thread-71 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/cluster_e3ab1e07-46f1-e4fd-8280-4ad2ac7fbc56/dfs/data/data1/current/BP-348945642-172.17.0.2-1732101543140/current, will proceed with Du for space computation calculation, 2024-11-20T11:19:04,914 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/cluster_e3ab1e07-46f1-e4fd-8280-4ad2ac7fbc56/dfs/data/data2/current/BP-348945642-172.17.0.2-1732101543140/current, will proceed with Du for space computation calculation, 2024-11-20T11:19:04,961 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T11:19:05,015 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd9d9d9d2a2d8fa1 with lease ID 0x87bec80e5ce44536: Processing first storage report for DS-48ddca81-e90a-4a84-b681-d20df2e4d911 from datanode DatanodeRegistration(127.0.0.1:38643, datanodeUuid=204d1068-de64-4920-8a56-72bfc6d41169, infoPort=42205, infoSecurePort=0, ipcPort=42625, storageInfo=lv=-57;cid=testClusterID;nsid=183493516;c=1732101543140) 2024-11-20T11:19:05,017 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9d9d9d2a2d8fa1 with lease ID 0x87bec80e5ce44536: from storage DS-48ddca81-e90a-4a84-b681-d20df2e4d911 node DatanodeRegistration(127.0.0.1:38643, datanodeUuid=204d1068-de64-4920-8a56-72bfc6d41169, infoPort=42205, infoSecurePort=0, ipcPort=42625, storageInfo=lv=-57;cid=testClusterID;nsid=183493516;c=1732101543140), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T11:19:05,017 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd9d9d9d2a2d8fa1 with lease ID 0x87bec80e5ce44536: Processing first storage report for DS-8bdae9b6-9903-4726-84e2-a195c92373f6 from datanode DatanodeRegistration(127.0.0.1:38643, datanodeUuid=204d1068-de64-4920-8a56-72bfc6d41169, infoPort=42205, infoSecurePort=0, ipcPort=42625, storageInfo=lv=-57;cid=testClusterID;nsid=183493516;c=1732101543140) 2024-11-20T11:19:05,017 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9d9d9d2a2d8fa1 with lease ID 0x87bec80e5ce44536: from storage DS-8bdae9b6-9903-4726-84e2-a195c92373f6 node DatanodeRegistration(127.0.0.1:38643, datanodeUuid=204d1068-de64-4920-8a56-72bfc6d41169, infoPort=42205, infoSecurePort=0, ipcPort=42625, storageInfo=lv=-57;cid=testClusterID;nsid=183493516;c=1732101543140), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T11:19:05,060 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0 2024-11-20T11:19:05,150 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/cluster_e3ab1e07-46f1-e4fd-8280-4ad2ac7fbc56/zookeeper_0, clientPort=62733, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/cluster_e3ab1e07-46f1-e4fd-8280-4ad2ac7fbc56/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/cluster_e3ab1e07-46f1-e4fd-8280-4ad2ac7fbc56/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T11:19:05,161 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=62733 2024-11-20T11:19:05,171 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T11:19:05,173 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T11:19:05,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741825_1001 (size=7) 2024-11-20T11:19:05,795 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830 with version=8 2024-11-20T11:19:05,795 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/hbase-staging 2024-11-20T11:19:05,927 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T11:19:06,199 INFO [Time-limited test {}] client.ConnectionUtils(129): master/ee8338ed7cc0:0 server-side Connection retries=45 2024-11-20T11:19:06,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T11:19:06,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T11:19:06,221 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T11:19:06,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T11:19:06,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T11:19:06,358 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T11:19:06,419 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T11:19:06,428 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T11:19:06,431 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T11:19:06,459 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19831 (auto-detected) 2024-11-20T11:19:06,460 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T11:19:06,479 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36055 2024-11-20T11:19:06,487 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T11:19:06,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T11:19:06,502 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:36055 connecting to ZooKeeper ensemble=127.0.0.1:62733 2024-11-20T11:19:06,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:360550x0, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T11:19:06,537 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36055-0x10014a7d58e0000 connected 2024-11-20T11:19:06,565 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T11:19:06,567 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T11:19:06,571 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T11:19:06,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36055 2024-11-20T11:19:06,576 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36055 2024-11-20T11:19:06,576 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36055 2024-11-20T11:19:06,577 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36055 2024-11-20T11:19:06,578 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36055 2024-11-20T11:19:06,586 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830, hbase.cluster.distributed=false 2024-11-20T11:19:06,667 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/ee8338ed7cc0:0 server-side Connection retries=45 2024-11-20T11:19:06,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T11:19:06,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T11:19:06,667 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T11:19:06,668 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T11:19:06,668 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T11:19:06,670 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T11:19:06,672 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T11:19:06,673 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35185 2024-11-20T11:19:06,675 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T11:19:06,680 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T11:19:06,682 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T11:19:06,686 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T11:19:06,691 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:35185 connecting to ZooKeeper ensemble=127.0.0.1:62733 2024-11-20T11:19:06,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:351850x0, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T11:19:06,696 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:351850x0, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T11:19:06,696 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35185-0x10014a7d58e0001 connected 2024-11-20T11:19:06,697 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T11:19:06,698 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T11:19:06,700 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35185 2024-11-20T11:19:06,700 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35185 2024-11-20T11:19:06,701 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35185 2024-11-20T11:19:06,702 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35185 2024-11-20T11:19:06,703 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35185 2024-11-20T11:19:06,705 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/ee8338ed7cc0,36055,1732101545920 2024-11-20T11:19:06,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T11:19:06,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T11:19:06,714 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ee8338ed7cc0,36055,1732101545920 2024-11-20T11:19:06,720 DEBUG [M:0;ee8338ed7cc0:36055 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ee8338ed7cc0:36055 2024-11-20T11:19:06,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T11:19:06,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T11:19:06,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:06,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:06,737 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T11:19:06,738 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T11:19:06,738 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ee8338ed7cc0,36055,1732101545920 from backup master directory 2024-11-20T11:19:06,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ee8338ed7cc0,36055,1732101545920 2024-11-20T11:19:06,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T11:19:06,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T11:19:06,741 WARN [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T11:19:06,742 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ee8338ed7cc0,36055,1732101545920 2024-11-20T11:19:06,744 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T11:19:06,746 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T11:19:06,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741826_1002 (size=42) 2024-11-20T11:19:07,214 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/hbase.id with ID: 3396b959-43f6-48cf-aa69-a923d7f0532f 2024-11-20T11:19:07,255 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T11:19:07,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:07,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:07,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741827_1003 (size=196) 2024-11-20T11:19:07,718 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T11:19:07,721 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T11:19:07,739 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:07,743 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T11:19:07,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741828_1004 (size=1189) 2024-11-20T11:19:07,794 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store 2024-11-20T11:19:07,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741829_1005 (size=34) 2024-11-20T11:19:08,217 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T11:19:08,217 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:19:08,218 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T11:19:08,218 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T11:19:08,219 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T11:19:08,219 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T11:19:08,219 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T11:19:08,219 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T11:19:08,219 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T11:19:08,222 WARN [master/ee8338ed7cc0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/.initializing 2024-11-20T11:19:08,222 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/WALs/ee8338ed7cc0,36055,1732101545920 2024-11-20T11:19:08,229 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T11:19:08,240 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ee8338ed7cc0%2C36055%2C1732101545920, suffix=, logDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/WALs/ee8338ed7cc0,36055,1732101545920, archiveDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/oldWALs, maxLogs=10 2024-11-20T11:19:08,263 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/WALs/ee8338ed7cc0,36055,1732101545920/ee8338ed7cc0%2C36055%2C1732101545920.1732101548244, exclude list is [], retry=0 2024-11-20T11:19:08,279 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38643,DS-48ddca81-e90a-4a84-b681-d20df2e4d911,DISK] 2024-11-20T11:19:08,283 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-20T11:19:08,320 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/WALs/ee8338ed7cc0,36055,1732101545920/ee8338ed7cc0%2C36055%2C1732101545920.1732101548244 2024-11-20T11:19:08,321 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42205:42205)] 2024-11-20T11:19:08,321 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T11:19:08,322 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:19:08,325 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T11:19:08,326 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T11:19:08,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T11:19:08,389 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T11:19:08,393 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:08,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T11:19:08,397 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T11:19:08,400 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T11:19:08,400 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:08,402 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:08,402 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T11:19:08,405 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T11:19:08,405 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:08,406 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:08,406 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T11:19:08,409 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T11:19:08,409 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:08,410 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:08,413 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T11:19:08,415 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T11:19:08,423 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T11:19:08,427 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T11:19:08,432 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T11:19:08,433 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74948014, jitterRate=0.11681243777275085}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T11:19:08,437 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T11:19:08,438 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T11:19:08,467 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58a637bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:08,501 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-20T11:19:08,513 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T11:19:08,514 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T11:19:08,516 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T11:19:08,517 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-20T11:19:08,522 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-11-20T11:19:08,522 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T11:19:08,547 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T11:19:08,558 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T11:19:08,560 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-20T11:19:08,562 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T11:19:08,563 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T11:19:08,565 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-20T11:19:08,567 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T11:19:08,570 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T11:19:08,571 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-20T11:19:08,572 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T11:19:08,574 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T11:19:08,583 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T11:19:08,585 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T11:19:08,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T11:19:08,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T11:19:08,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:08,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:08,589 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=ee8338ed7cc0,36055,1732101545920, sessionid=0x10014a7d58e0000, setting cluster-up flag (Was=false) 2024-11-20T11:19:08,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:08,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:08,607 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T11:19:08,608 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ee8338ed7cc0,36055,1732101545920 2024-11-20T11:19:08,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:08,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:08,618 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T11:19:08,620 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ee8338ed7cc0,36055,1732101545920 2024-11-20T11:19:08,698 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-20T11:19:08,704 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-20T11:19:08,706 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T11:19:08,712 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ee8338ed7cc0,36055,1732101545920 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T11:19:08,715 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ee8338ed7cc0:0, corePoolSize=5, maxPoolSize=5 2024-11-20T11:19:08,716 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ee8338ed7cc0:0, corePoolSize=5, maxPoolSize=5 2024-11-20T11:19:08,716 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ee8338ed7cc0:0, corePoolSize=5, maxPoolSize=5 2024-11-20T11:19:08,716 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ee8338ed7cc0:0, corePoolSize=5, maxPoolSize=5 2024-11-20T11:19:08,716 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ee8338ed7cc0:0, corePoolSize=10, maxPoolSize=10 2024-11-20T11:19:08,716 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ee8338ed7cc0:0, corePoolSize=1, maxPoolSize=1 2024-11-20T11:19:08,716 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ee8338ed7cc0:0, corePoolSize=2, maxPoolSize=2 2024-11-20T11:19:08,717 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ee8338ed7cc0:0, corePoolSize=1, maxPoolSize=1 2024-11-20T11:19:08,718 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732101578718 2024-11-20T11:19:08,719 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T11:19:08,719 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ee8338ed7cc0:35185 2024-11-20T11:19:08,720 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T11:19:08,721 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1008): ClusterId : 3396b959-43f6-48cf-aa69-a923d7f0532f 2024-11-20T11:19:08,721 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T11:19:08,722 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-20T11:19:08,724 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T11:19:08,724 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T11:19:08,724 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T11:19:08,724 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T11:19:08,725 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T11:19:08,725 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:08,727 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T11:19:08,727 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:08,727 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T11:19:08,728 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T11:19:08,728 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T11:19:08,730 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T11:19:08,730 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T11:19:08,730 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T11:19:08,730 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T11:19:08,732 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ee8338ed7cc0:0:becomeActiveMaster-HFileCleaner.large.0-1732101548732,5,FailOnTimeoutGroup] 2024-11-20T11:19:08,732 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ee8338ed7cc0:0:becomeActiveMaster-HFileCleaner.small.0-1732101548732,5,FailOnTimeoutGroup] 2024-11-20T11:19:08,732 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:08,732 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T11:19:08,734 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T11:19:08,734 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:08,734 DEBUG [RS:0;ee8338ed7cc0:35185 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d2056ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:08,734 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:08,740 DEBUG [RS:0;ee8338ed7cc0:35185 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@416e45f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ee8338ed7cc0/172.17.0.2:0 2024-11-20T11:19:08,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741831_1007 (size=1039) 2024-11-20T11:19:08,745 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-20T11:19:08,745 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-20T11:19:08,745 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-20T11:19:08,747 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(3073): reportForDuty to master=ee8338ed7cc0,36055,1732101545920 with isa=ee8338ed7cc0/172.17.0.2:35185, startcode=1732101546666 2024-11-20T11:19:08,762 DEBUG [RS:0;ee8338ed7cc0:35185 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T11:19:08,796 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33565, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T11:19:08,801 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36055 {}] master.ServerManager(332): Checking decommissioned status of RegionServer ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:08,803 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36055 {}] master.ServerManager(486): Registering regionserver=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:08,818 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830 2024-11-20T11:19:08,818 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:43109 2024-11-20T11:19:08,818 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-20T11:19:08,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T11:19:08,824 DEBUG [RS:0;ee8338ed7cc0:35185 {}] zookeeper.ZKUtil(111): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:08,824 WARN [RS:0;ee8338ed7cc0:35185 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T11:19:08,824 INFO [RS:0;ee8338ed7cc0:35185 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T11:19:08,824 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/WALs/ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:08,826 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ee8338ed7cc0,35185,1732101546666] 2024-11-20T11:19:08,838 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-20T11:19:08,850 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T11:19:08,862 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T11:19:08,865 INFO [RS:0;ee8338ed7cc0:35185 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T11:19:08,865 INFO [RS:0;ee8338ed7cc0:35185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:08,866 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-20T11:19:08,873 INFO [RS:0;ee8338ed7cc0:35185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:08,874 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ee8338ed7cc0:0, corePoolSize=1, maxPoolSize=1 2024-11-20T11:19:08,874 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ee8338ed7cc0:0, corePoolSize=1, maxPoolSize=1 2024-11-20T11:19:08,874 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0, corePoolSize=1, maxPoolSize=1 2024-11-20T11:19:08,874 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0, corePoolSize=1, maxPoolSize=1 2024-11-20T11:19:08,874 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ee8338ed7cc0:0, corePoolSize=1, maxPoolSize=1 2024-11-20T11:19:08,875 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ee8338ed7cc0:0, corePoolSize=2, maxPoolSize=2 2024-11-20T11:19:08,875 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ee8338ed7cc0:0, corePoolSize=1, maxPoolSize=1 2024-11-20T11:19:08,875 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ee8338ed7cc0:0, corePoolSize=1, maxPoolSize=1 2024-11-20T11:19:08,875 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ee8338ed7cc0:0, corePoolSize=1, maxPoolSize=1 2024-11-20T11:19:08,875 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ee8338ed7cc0:0, corePoolSize=1, maxPoolSize=1 2024-11-20T11:19:08,875 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ee8338ed7cc0:0, corePoolSize=1, maxPoolSize=1 2024-11-20T11:19:08,876 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ee8338ed7cc0:0, corePoolSize=3, maxPoolSize=3 2024-11-20T11:19:08,876 DEBUG [RS:0;ee8338ed7cc0:35185 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0, corePoolSize=3, maxPoolSize=3 2024-11-20T11:19:08,877 INFO [RS:0;ee8338ed7cc0:35185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:08,878 INFO [RS:0;ee8338ed7cc0:35185 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:08,878 INFO [RS:0;ee8338ed7cc0:35185 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:08,878 INFO [RS:0;ee8338ed7cc0:35185 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:08,878 INFO [RS:0;ee8338ed7cc0:35185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ee8338ed7cc0,35185,1732101546666-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T11:19:08,898 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T11:19:08,900 INFO [RS:0;ee8338ed7cc0:35185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ee8338ed7cc0,35185,1732101546666-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:08,920 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.Replication(204): ee8338ed7cc0,35185,1732101546666 started 2024-11-20T11:19:08,920 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1767): Serving as ee8338ed7cc0,35185,1732101546666, RpcServer on ee8338ed7cc0/172.17.0.2:35185, sessionid=0x10014a7d58e0001 2024-11-20T11:19:08,921 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T11:19:08,921 DEBUG [RS:0;ee8338ed7cc0:35185 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:08,921 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ee8338ed7cc0,35185,1732101546666' 2024-11-20T11:19:08,921 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T11:19:08,922 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T11:19:08,922 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T11:19:08,923 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T11:19:08,923 DEBUG [RS:0;ee8338ed7cc0:35185 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:08,923 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ee8338ed7cc0,35185,1732101546666' 2024-11-20T11:19:08,923 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T11:19:08,923 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T11:19:08,924 DEBUG [RS:0;ee8338ed7cc0:35185 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T11:19:08,924 INFO [RS:0;ee8338ed7cc0:35185 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T11:19:08,924 INFO [RS:0;ee8338ed7cc0:35185 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T11:19:09,030 INFO [RS:0;ee8338ed7cc0:35185 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T11:19:09,033 INFO [RS:0;ee8338ed7cc0:35185 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ee8338ed7cc0%2C35185%2C1732101546666, suffix=, logDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/WALs/ee8338ed7cc0,35185,1732101546666, archiveDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/oldWALs, maxLogs=32 2024-11-20T11:19:09,050 DEBUG [RS:0;ee8338ed7cc0:35185 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/WALs/ee8338ed7cc0,35185,1732101546666/ee8338ed7cc0%2C35185%2C1732101546666.1732101549035, exclude list is [], retry=0 2024-11-20T11:19:09,055 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38643,DS-48ddca81-e90a-4a84-b681-d20df2e4d911,DISK] 2024-11-20T11:19:09,058 INFO [RS:0;ee8338ed7cc0:35185 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/WALs/ee8338ed7cc0,35185,1732101546666/ee8338ed7cc0%2C35185%2C1732101546666.1732101549035 2024-11-20T11:19:09,059 DEBUG [RS:0;ee8338ed7cc0:35185 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42205:42205)] 2024-11-20T11:19:09,145 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-20T11:19:09,145 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830 2024-11-20T11:19:09,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741833_1009 (size=32) 2024-11-20T11:19:09,558 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:19:09,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T11:19:09,563 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T11:19:09,564 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:09,564 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T11:19:09,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T11:19:09,567 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T11:19:09,567 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:09,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T11:19:09,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T11:19:09,570 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T11:19:09,570 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:09,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T11:19:09,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740 2024-11-20T11:19:09,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740 2024-11-20T11:19:09,576 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T11:19:09,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T11:19:09,581 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T11:19:09,582 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58912029, jitterRate=-0.12214235961437225}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T11:19:09,585 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T11:19:09,585 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T11:19:09,585 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T11:19:09,585 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T11:19:09,585 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T11:19:09,585 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T11:19:09,586 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T11:19:09,586 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T11:19:09,589 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T11:19:09,589 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-20T11:19:09,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T11:19:09,603 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T11:19:09,605 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T11:19:09,757 DEBUG [ee8338ed7cc0:36055 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T11:19:09,762 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:09,767 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ee8338ed7cc0,35185,1732101546666, state=OPENING 2024-11-20T11:19:09,772 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T11:19:09,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:09,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:09,775 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T11:19:09,775 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T11:19:09,777 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:19:09,951 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:09,953 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T11:19:09,957 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T11:19:09,967 INFO [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-20T11:19:09,968 INFO [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T11:19:09,968 INFO [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-20T11:19:09,972 INFO [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ee8338ed7cc0%2C35185%2C1732101546666.meta, suffix=.meta, logDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/WALs/ee8338ed7cc0,35185,1732101546666, archiveDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/oldWALs, maxLogs=32 2024-11-20T11:19:09,988 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/WALs/ee8338ed7cc0,35185,1732101546666/ee8338ed7cc0%2C35185%2C1732101546666.meta.1732101549974.meta, exclude list is [], retry=0 2024-11-20T11:19:09,993 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38643,DS-48ddca81-e90a-4a84-b681-d20df2e4d911,DISK] 2024-11-20T11:19:09,996 INFO [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/WALs/ee8338ed7cc0,35185,1732101546666/ee8338ed7cc0%2C35185%2C1732101546666.meta.1732101549974.meta 2024-11-20T11:19:09,997 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42205:42205)] 2024-11-20T11:19:09,997 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T11:19:09,999 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T11:19:10,061 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T11:19:10,066 INFO [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T11:19:10,071 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T11:19:10,071 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:19:10,071 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-20T11:19:10,072 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-20T11:19:10,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T11:19:10,077 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T11:19:10,077 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:10,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T11:19:10,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T11:19:10,079 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T11:19:10,079 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:10,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T11:19:10,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T11:19:10,082 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T11:19:10,082 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:10,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T11:19:10,084 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740 2024-11-20T11:19:10,087 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740 2024-11-20T11:19:10,089 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T11:19:10,092 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T11:19:10,094 INFO [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69436774, jitterRate=0.0346885621547699}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T11:19:10,095 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T11:19:10,102 INFO [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732101549946 2024-11-20T11:19:10,113 DEBUG [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T11:19:10,114 INFO [RS_OPEN_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-20T11:19:10,115 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:10,117 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ee8338ed7cc0,35185,1732101546666, state=OPEN 2024-11-20T11:19:10,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T11:19:10,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T11:19:10,121 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T11:19:10,121 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T11:19:10,125 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T11:19:10,125 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=ee8338ed7cc0,35185,1732101546666 in 344 msec 2024-11-20T11:19:10,131 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T11:19:10,131 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 532 msec 2024-11-20T11:19:10,136 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4820 sec 2024-11-20T11:19:10,136 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732101550136, completionTime=-1 2024-11-20T11:19:10,137 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T11:19:10,137 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-20T11:19:10,175 DEBUG [hconnection-0x6a7920e2-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:10,178 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44374, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:10,188 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-20T11:19:10,188 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732101610188 2024-11-20T11:19:10,189 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732101670189 2024-11-20T11:19:10,189 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 51 msec 2024-11-20T11:19:10,210 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ee8338ed7cc0,36055,1732101545920-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:10,210 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ee8338ed7cc0,36055,1732101545920-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:10,210 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ee8338ed7cc0,36055,1732101545920-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:10,212 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ee8338ed7cc0:36055, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:10,212 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:10,217 DEBUG [master/ee8338ed7cc0:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-20T11:19:10,220 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-20T11:19:10,221 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T11:19:10,228 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-20T11:19:10,231 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T11:19:10,232 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:10,234 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T11:19:10,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741835_1011 (size=358) 2024-11-20T11:19:10,649 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2931c42a5e0431c7e1d9a63f9b78ad4e, NAME => 'hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830 2024-11-20T11:19:10,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741836_1012 (size=42) 2024-11-20T11:19:11,059 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:19:11,060 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 2931c42a5e0431c7e1d9a63f9b78ad4e, disabling compactions & flushes 2024-11-20T11:19:11,060 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. 2024-11-20T11:19:11,060 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. 2024-11-20T11:19:11,060 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. after waiting 0 ms 2024-11-20T11:19:11,060 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. 2024-11-20T11:19:11,060 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. 2024-11-20T11:19:11,060 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2931c42a5e0431c7e1d9a63f9b78ad4e: 2024-11-20T11:19:11,063 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T11:19:11,069 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732101551064"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732101551064"}]},"ts":"1732101551064"} 2024-11-20T11:19:11,094 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T11:19:11,096 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T11:19:11,099 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101551096"}]},"ts":"1732101551096"} 2024-11-20T11:19:11,103 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-20T11:19:11,109 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=2931c42a5e0431c7e1d9a63f9b78ad4e, ASSIGN}] 2024-11-20T11:19:11,111 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=2931c42a5e0431c7e1d9a63f9b78ad4e, ASSIGN 2024-11-20T11:19:11,113 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=2931c42a5e0431c7e1d9a63f9b78ad4e, ASSIGN; state=OFFLINE, location=ee8338ed7cc0,35185,1732101546666; forceNewPlan=false, retain=false 2024-11-20T11:19:11,264 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=2931c42a5e0431c7e1d9a63f9b78ad4e, regionState=OPENING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:11,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 2931c42a5e0431c7e1d9a63f9b78ad4e, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:19:11,422 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:11,430 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. 2024-11-20T11:19:11,431 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 2931c42a5e0431c7e1d9a63f9b78ad4e, NAME => 'hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e.', STARTKEY => '', ENDKEY => ''} 2024-11-20T11:19:11,431 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 2931c42a5e0431c7e1d9a63f9b78ad4e 2024-11-20T11:19:11,431 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:19:11,432 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 2931c42a5e0431c7e1d9a63f9b78ad4e 2024-11-20T11:19:11,432 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 2931c42a5e0431c7e1d9a63f9b78ad4e 2024-11-20T11:19:11,435 INFO [StoreOpener-2931c42a5e0431c7e1d9a63f9b78ad4e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2931c42a5e0431c7e1d9a63f9b78ad4e 2024-11-20T11:19:11,437 INFO [StoreOpener-2931c42a5e0431c7e1d9a63f9b78ad4e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2931c42a5e0431c7e1d9a63f9b78ad4e columnFamilyName info 2024-11-20T11:19:11,438 DEBUG [StoreOpener-2931c42a5e0431c7e1d9a63f9b78ad4e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:11,439 INFO [StoreOpener-2931c42a5e0431c7e1d9a63f9b78ad4e-1 {}] regionserver.HStore(327): Store=2931c42a5e0431c7e1d9a63f9b78ad4e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:11,440 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/namespace/2931c42a5e0431c7e1d9a63f9b78ad4e 2024-11-20T11:19:11,441 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/namespace/2931c42a5e0431c7e1d9a63f9b78ad4e 2024-11-20T11:19:11,446 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 2931c42a5e0431c7e1d9a63f9b78ad4e 2024-11-20T11:19:11,450 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/namespace/2931c42a5e0431c7e1d9a63f9b78ad4e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T11:19:11,452 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 2931c42a5e0431c7e1d9a63f9b78ad4e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66571334, jitterRate=-0.00800982117652893}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T11:19:11,454 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 2931c42a5e0431c7e1d9a63f9b78ad4e: 2024-11-20T11:19:11,456 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e., pid=6, masterSystemTime=1732101551422 2024-11-20T11:19:11,460 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. 2024-11-20T11:19:11,460 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. 2024-11-20T11:19:11,461 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=2931c42a5e0431c7e1d9a63f9b78ad4e, regionState=OPEN, openSeqNum=2, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:11,468 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T11:19:11,469 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 2931c42a5e0431c7e1d9a63f9b78ad4e, server=ee8338ed7cc0,35185,1732101546666 in 197 msec 2024-11-20T11:19:11,472 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T11:19:11,472 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=2931c42a5e0431c7e1d9a63f9b78ad4e, ASSIGN in 359 msec 2024-11-20T11:19:11,473 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T11:19:11,473 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101551473"}]},"ts":"1732101551473"} 2024-11-20T11:19:11,476 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-20T11:19:11,480 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T11:19:11,482 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2580 sec 2024-11-20T11:19:11,532 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-20T11:19:11,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-20T11:19:11,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:11,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:19:11,563 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-20T11:19:11,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T11:19:11,583 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 23 msec 2024-11-20T11:19:11,586 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-20T11:19:11,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T11:19:11,601 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 14 msec 2024-11-20T11:19:11,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-20T11:19:11,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-20T11:19:11,615 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.873sec 2024-11-20T11:19:11,616 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T11:19:11,618 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T11:19:11,619 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T11:19:11,619 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T11:19:11,619 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T11:19:11,620 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ee8338ed7cc0,36055,1732101545920-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T11:19:11,621 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ee8338ed7cc0,36055,1732101545920-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T11:19:11,627 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-20T11:19:11,628 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T11:19:11,628 INFO [master/ee8338ed7cc0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ee8338ed7cc0,36055,1732101545920-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T11:19:11,724 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e541e88 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e83c466 2024-11-20T11:19:11,724 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-20T11:19:11,731 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a704f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:11,735 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T11:19:11,736 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T11:19:11,747 DEBUG [hconnection-0x68773b0e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:11,755 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44378, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:11,764 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=ee8338ed7cc0,36055,1732101545920 2024-11-20T11:19:11,780 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=174, ProcessCount=11, AvailableMemoryMB=6574 2024-11-20T11:19:11,790 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T11:19:11,793 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38942, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T11:19:11,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T11:19:11,803 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T11:19:11,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T11:19:11,807 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T11:19:11,808 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:11,809 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T11:19:11,810 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-20T11:19:11,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741837_1013 (size=960) 2024-11-20T11:19:11,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T11:19:11,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T11:19:12,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T11:19:12,248 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830 2024-11-20T11:19:12,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741838_1014 (size=53) 2024-11-20T11:19:12,258 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:19:12,259 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 96d866d8db5bf8a73bb64ed0351e8f75, disabling compactions & flushes 2024-11-20T11:19:12,259 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:12,259 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:12,259 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. after waiting 0 ms 2024-11-20T11:19:12,259 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:12,259 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:12,259 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:12,261 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T11:19:12,261 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732101552261"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732101552261"}]},"ts":"1732101552261"} 2024-11-20T11:19:12,264 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T11:19:12,266 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T11:19:12,266 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101552266"}]},"ts":"1732101552266"} 2024-11-20T11:19:12,268 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T11:19:12,273 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=96d866d8db5bf8a73bb64ed0351e8f75, ASSIGN}] 2024-11-20T11:19:12,275 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=96d866d8db5bf8a73bb64ed0351e8f75, ASSIGN 2024-11-20T11:19:12,276 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=96d866d8db5bf8a73bb64ed0351e8f75, ASSIGN; state=OFFLINE, location=ee8338ed7cc0,35185,1732101546666; forceNewPlan=false, retain=false 2024-11-20T11:19:12,427 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=96d866d8db5bf8a73bb64ed0351e8f75, regionState=OPENING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:12,430 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:19:12,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T11:19:12,584 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:12,590 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:12,591 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} 2024-11-20T11:19:12,591 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:12,592 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:19:12,592 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:12,592 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:12,594 INFO [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:12,597 INFO [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:19:12,598 INFO [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96d866d8db5bf8a73bb64ed0351e8f75 columnFamilyName A 2024-11-20T11:19:12,598 DEBUG [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:12,599 INFO [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] regionserver.HStore(327): Store=96d866d8db5bf8a73bb64ed0351e8f75/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:12,599 INFO [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:12,601 INFO [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:19:12,601 INFO [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96d866d8db5bf8a73bb64ed0351e8f75 columnFamilyName B 2024-11-20T11:19:12,601 DEBUG [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:12,602 INFO [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] regionserver.HStore(327): Store=96d866d8db5bf8a73bb64ed0351e8f75/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:12,602 INFO [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:12,604 INFO [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:19:12,604 INFO [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96d866d8db5bf8a73bb64ed0351e8f75 columnFamilyName C 2024-11-20T11:19:12,604 DEBUG [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:12,605 INFO [StoreOpener-96d866d8db5bf8a73bb64ed0351e8f75-1 {}] regionserver.HStore(327): Store=96d866d8db5bf8a73bb64ed0351e8f75/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:12,606 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:12,607 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:12,608 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:12,610 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T11:19:12,613 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:12,616 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T11:19:12,617 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 96d866d8db5bf8a73bb64ed0351e8f75; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59726096, jitterRate=-0.11001181602478027}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T11:19:12,618 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:12,619 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., pid=11, masterSystemTime=1732101552584 2024-11-20T11:19:12,622 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:12,622 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:12,623 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=96d866d8db5bf8a73bb64ed0351e8f75, regionState=OPEN, openSeqNum=2, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:12,629 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-20T11:19:12,629 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 in 196 msec 2024-11-20T11:19:12,632 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T11:19:12,632 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=96d866d8db5bf8a73bb64ed0351e8f75, ASSIGN in 356 msec 2024-11-20T11:19:12,633 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T11:19:12,634 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101552633"}]},"ts":"1732101552633"} 2024-11-20T11:19:12,636 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T11:19:12,639 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T11:19:12,641 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 836 msec 2024-11-20T11:19:12,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T11:19:12,959 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-20T11:19:12,964 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f6e36fe to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e98ea32 2024-11-20T11:19:12,967 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9fcedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:12,970 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:12,972 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48110, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:12,975 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T11:19:12,977 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60742, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T11:19:12,984 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-11-20T11:19:12,988 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9bd0964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:12,989 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22cb07dd to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72b32f98 2024-11-20T11:19:12,993 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:12,994 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-11-20T11:19:12,997 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b55c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:12,999 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5400112e to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bbb5d8a 2024-11-20T11:19:13,002 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:13,004 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38766d64 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18603bb9 2024-11-20T11:19:13,008 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3883f7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:13,010 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-11-20T11:19:13,014 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:13,015 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70267494 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@490457fd 2024-11-20T11:19:13,018 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527c6d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:13,020 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-11-20T11:19:13,023 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:13,024 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c915d17 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6b07e3 2024-11-20T11:19:13,027 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595e9ebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:13,032 DEBUG [hconnection-0x3bca7345-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:13,032 DEBUG [hconnection-0x1107dd33-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:13,033 DEBUG [hconnection-0x2697246c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:13,033 DEBUG [hconnection-0x21675e13-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:13,035 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48114, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:13,036 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48122, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:13,036 DEBUG [hconnection-0x4fd12293-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:13,040 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48132, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:13,040 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48134, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:13,040 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:13,042 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:13,046 DEBUG [hconnection-0x5dc12106-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:13,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-20T11:19:13,050 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:13,052 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T11:19:13,054 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:13,063 DEBUG [hconnection-0x19308315-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:13,063 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:13,066 DEBUG [hconnection-0x63f71b8d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:13,067 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48158, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:13,068 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48174, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:13,069 DEBUG [hconnection-0x2c567894-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:13,077 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:13,116 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T11:19:13,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:13,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:13,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:13,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:13,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:13,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:13,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T11:19:13,215 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T11:19:13,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:13,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:13,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:13,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,247 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/96cf97f634514f289c6d5ece56e9a3b2 is 50, key is test_row_0/A:col10/1732101553092/Put/seqid=0 2024-11-20T11:19:13,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741839_1015 (size=12001) 2024-11-20T11:19:13,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101613297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101613300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101613303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101613306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101613310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T11:19:13,385 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T11:19:13,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:13,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:13,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:13,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101613444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101613446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101613446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101613446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101613444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,544 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T11:19:13,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:13,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:13,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:13,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101613650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101613651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101613653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101613654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101613654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T11:19:13,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/96cf97f634514f289c6d5ece56e9a3b2 2024-11-20T11:19:13,707 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T11:19:13,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:13,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:13,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:13,709 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/ac712f963da64ffabae219239abec686 is 50, key is test_row_0/B:col10/1732101553092/Put/seqid=0 2024-11-20T11:19:13,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741840_1016 (size=12001) 2024-11-20T11:19:13,863 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T11:19:13,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:13,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:13,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:13,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:13,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101613956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101613959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101613961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101613964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:13,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:13,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101613965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,018 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T11:19:14,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:14,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:14,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:14,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:14,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:14,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T11:19:14,173 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,174 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T11:19:14,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:14,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:14,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:14,175 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:14,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:14,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:14,177 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/ac712f963da64ffabae219239abec686 2024-11-20T11:19:14,217 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/0c0a86b867914278ac862b162db6b491 is 50, key is test_row_0/C:col10/1732101553092/Put/seqid=0 2024-11-20T11:19:14,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741841_1017 (size=12001) 2024-11-20T11:19:14,239 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/0c0a86b867914278ac862b162db6b491 2024-11-20T11:19:14,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/96cf97f634514f289c6d5ece56e9a3b2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/96cf97f634514f289c6d5ece56e9a3b2 2024-11-20T11:19:14,269 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/96cf97f634514f289c6d5ece56e9a3b2, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T11:19:14,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/ac712f963da64ffabae219239abec686 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/ac712f963da64ffabae219239abec686 2024-11-20T11:19:14,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/ac712f963da64ffabae219239abec686, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T11:19:14,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/0c0a86b867914278ac862b162db6b491 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/0c0a86b867914278ac862b162db6b491 2024-11-20T11:19:14,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/0c0a86b867914278ac862b162db6b491, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T11:19:14,311 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 96d866d8db5bf8a73bb64ed0351e8f75 in 1196ms, sequenceid=13, compaction requested=false 2024-11-20T11:19:14,313 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T11:19:14,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:14,330 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,330 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T11:19:14,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:14,331 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T11:19:14,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:14,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:14,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:14,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:14,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:14,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:14,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/ed3e27bbad774df1a8562284c3e3fea0 is 50, key is test_row_0/A:col10/1732101553298/Put/seqid=0 2024-11-20T11:19:14,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741842_1018 (size=12001) 2024-11-20T11:19:14,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:14,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:14,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101614492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101614496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101614499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101614503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101614505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101614607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101614610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101614611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101614613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101614614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,777 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/ed3e27bbad774df1a8562284c3e3fea0 2024-11-20T11:19:14,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/324b2b752f6446169f2ee961efa00e83 is 50, key is test_row_0/B:col10/1732101553298/Put/seqid=0 2024-11-20T11:19:14,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101614815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101614817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101614816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101614818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:14,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101614821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:14,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741843_1019 (size=12001) 2024-11-20T11:19:14,837 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/324b2b752f6446169f2ee961efa00e83 2024-11-20T11:19:14,842 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T11:19:14,844 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-20T11:19:14,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/9f8e86865eb64e7aa912d9575caceaac is 50, key is test_row_0/C:col10/1732101553298/Put/seqid=0 2024-11-20T11:19:14,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741844_1020 (size=12001) 2024-11-20T11:19:14,888 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/9f8e86865eb64e7aa912d9575caceaac 2024-11-20T11:19:14,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/ed3e27bbad774df1a8562284c3e3fea0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/ed3e27bbad774df1a8562284c3e3fea0 2024-11-20T11:19:14,926 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/ed3e27bbad774df1a8562284c3e3fea0, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T11:19:14,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/324b2b752f6446169f2ee961efa00e83 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/324b2b752f6446169f2ee961efa00e83 2024-11-20T11:19:14,942 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/324b2b752f6446169f2ee961efa00e83, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T11:19:14,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/9f8e86865eb64e7aa912d9575caceaac as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9f8e86865eb64e7aa912d9575caceaac 2024-11-20T11:19:14,958 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9f8e86865eb64e7aa912d9575caceaac, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T11:19:14,961 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 96d866d8db5bf8a73bb64ed0351e8f75 in 629ms, sequenceid=37, compaction requested=false 2024-11-20T11:19:14,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:14,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:14,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-20T11:19:14,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-20T11:19:14,968 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-20T11:19:14,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9090 sec 2024-11-20T11:19:14,972 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.9280 sec 2024-11-20T11:19:15,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:15,125 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T11:19:15,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:15,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:15,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:15,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:15,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:15,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:15,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/bc47c49dc08d4d908b8715a5f7c8f741 is 50, key is test_row_0/A:col10/1732101554492/Put/seqid=0 2024-11-20T11:19:15,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T11:19:15,166 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-20T11:19:15,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741845_1021 (size=12001) 2024-11-20T11:19:15,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:15,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-20T11:19:15,174 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:15,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T11:19:15,175 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:15,176 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:15,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101615167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101615165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101615172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101615178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101615178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T11:19:15,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101615281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101615283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101615283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101615284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101615286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,332 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,333 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T11:19:15,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:15,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:15,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:15,334 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:15,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:15,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:15,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T11:19:15,488 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T11:19:15,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:15,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:15,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:15,490 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:15,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101615489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:15,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101615489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:15,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101615491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101615492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101615493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,573 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/bc47c49dc08d4d908b8715a5f7c8f741 2024-11-20T11:19:15,584 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T11:19:15,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/995be427326a4c9cb8bfa9e50f79a54d is 50, key is test_row_0/B:col10/1732101554492/Put/seqid=0 2024-11-20T11:19:15,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741846_1022 (size=12001) 2024-11-20T11:19:15,624 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/995be427326a4c9cb8bfa9e50f79a54d 2024-11-20T11:19:15,644 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T11:19:15,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:15,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:15,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:15,646 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:15,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:15,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:15,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/b483161ec6cd43e09238aea4e83d84fa is 50, key is test_row_0/C:col10/1732101554492/Put/seqid=0 2024-11-20T11:19:15,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741847_1023 (size=12001) 2024-11-20T11:19:15,682 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/b483161ec6cd43e09238aea4e83d84fa 2024-11-20T11:19:15,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/bc47c49dc08d4d908b8715a5f7c8f741 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/bc47c49dc08d4d908b8715a5f7c8f741 2024-11-20T11:19:15,715 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/bc47c49dc08d4d908b8715a5f7c8f741, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T11:19:15,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/995be427326a4c9cb8bfa9e50f79a54d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/995be427326a4c9cb8bfa9e50f79a54d 2024-11-20T11:19:15,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/995be427326a4c9cb8bfa9e50f79a54d, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T11:19:15,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/b483161ec6cd43e09238aea4e83d84fa as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/b483161ec6cd43e09238aea4e83d84fa 2024-11-20T11:19:15,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/b483161ec6cd43e09238aea4e83d84fa, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T11:19:15,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 96d866d8db5bf8a73bb64ed0351e8f75 in 634ms, sequenceid=51, compaction requested=true 2024-11-20T11:19:15,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:15,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:15,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:15,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:15,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:15,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:15,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:15,769 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:15,769 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:15,774 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:15,776 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:15,776 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/A is initiating minor compaction (all files) 2024-11-20T11:19:15,776 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/B is initiating minor compaction (all files) 2024-11-20T11:19:15,776 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/A in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:15,777 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/96cf97f634514f289c6d5ece56e9a3b2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/ed3e27bbad774df1a8562284c3e3fea0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/bc47c49dc08d4d908b8715a5f7c8f741] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=35.2 K 2024-11-20T11:19:15,777 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/B in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:15,778 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/ac712f963da64ffabae219239abec686, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/324b2b752f6446169f2ee961efa00e83, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/995be427326a4c9cb8bfa9e50f79a54d] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=35.2 K 2024-11-20T11:19:15,779 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96cf97f634514f289c6d5ece56e9a3b2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732101553086 2024-11-20T11:19:15,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T11:19:15,780 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting ac712f963da64ffabae219239abec686, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732101553086 2024-11-20T11:19:15,781 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 324b2b752f6446169f2ee961efa00e83, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732101553298 2024-11-20T11:19:15,781 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed3e27bbad774df1a8562284c3e3fea0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732101553298 2024-11-20T11:19:15,784 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc47c49dc08d4d908b8715a5f7c8f741, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732101554492 2024-11-20T11:19:15,786 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 995be427326a4c9cb8bfa9e50f79a54d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732101554492 2024-11-20T11:19:15,800 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T11:19:15,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:15,801 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T11:19:15,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:15,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:15,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:15,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:15,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:15,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:15,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:15,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:15,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/b1307322d9d341aba59cc514ceb0fca0 is 50, key is test_row_0/A:col10/1732101555167/Put/seqid=0 2024-11-20T11:19:15,843 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#B#compaction#10 average throughput is 0.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:15,844 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/331d39a94999402e9c22691f81ad90c1 is 50, key is test_row_0/B:col10/1732101554492/Put/seqid=0 2024-11-20T11:19:15,847 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#A#compaction#11 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:15,848 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/6b592734f8244edf9e6e0bd3719b3169 is 50, key is test_row_0/A:col10/1732101554492/Put/seqid=0 2024-11-20T11:19:15,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741850_1026 (size=12104) 2024-11-20T11:19:15,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741848_1024 (size=12001) 2024-11-20T11:19:15,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741849_1025 (size=12104) 2024-11-20T11:19:15,872 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/b1307322d9d341aba59cc514ceb0fca0 2024-11-20T11:19:15,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101615861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101615870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101615872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101615874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101615879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,893 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/331d39a94999402e9c22691f81ad90c1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/331d39a94999402e9c22691f81ad90c1 2024-11-20T11:19:15,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/f48086089cfc43fcb7e6bb98e7c76690 is 50, key is test_row_0/B:col10/1732101555167/Put/seqid=0 2024-11-20T11:19:15,920 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/B of 96d866d8db5bf8a73bb64ed0351e8f75 into 331d39a94999402e9c22691f81ad90c1(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:15,921 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:15,921 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/B, priority=13, startTime=1732101555768; duration=0sec 2024-11-20T11:19:15,921 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:15,921 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:B 2024-11-20T11:19:15,921 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:15,925 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:15,926 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/C is initiating minor compaction (all files) 2024-11-20T11:19:15,927 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/C in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:15,927 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/0c0a86b867914278ac862b162db6b491, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9f8e86865eb64e7aa912d9575caceaac, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/b483161ec6cd43e09238aea4e83d84fa] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=35.2 K 2024-11-20T11:19:15,928 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c0a86b867914278ac862b162db6b491, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732101553086 2024-11-20T11:19:15,929 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f8e86865eb64e7aa912d9575caceaac, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732101553298 2024-11-20T11:19:15,930 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting b483161ec6cd43e09238aea4e83d84fa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732101554492 2024-11-20T11:19:15,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741851_1027 (size=12001) 2024-11-20T11:19:15,935 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/f48086089cfc43fcb7e6bb98e7c76690 2024-11-20T11:19:15,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/5ac974337d134411a8df9765aeca66ec is 50, key is test_row_0/C:col10/1732101555167/Put/seqid=0 2024-11-20T11:19:15,963 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#C#compaction#13 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:15,964 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/5a0dfb0c00104d8dbd81d0fe64a7f6a9 is 50, key is test_row_0/C:col10/1732101554492/Put/seqid=0 2024-11-20T11:19:15,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101615982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741853_1029 (size=12104) 2024-11-20T11:19:15,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101615987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101615989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:15,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:15,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101615990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101615993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,002 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/5a0dfb0c00104d8dbd81d0fe64a7f6a9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5a0dfb0c00104d8dbd81d0fe64a7f6a9 2024-11-20T11:19:16,017 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/C of 96d866d8db5bf8a73bb64ed0351e8f75 into 5a0dfb0c00104d8dbd81d0fe64a7f6a9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:16,017 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:16,017 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/C, priority=13, startTime=1732101555768; duration=0sec 2024-11-20T11:19:16,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741852_1028 (size=12001) 2024-11-20T11:19:16,019 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:16,019 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:C 2024-11-20T11:19:16,021 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/5ac974337d134411a8df9765aeca66ec 2024-11-20T11:19:16,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/b1307322d9d341aba59cc514ceb0fca0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b1307322d9d341aba59cc514ceb0fca0 2024-11-20T11:19:16,046 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b1307322d9d341aba59cc514ceb0fca0, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T11:19:16,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/f48086089cfc43fcb7e6bb98e7c76690 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f48086089cfc43fcb7e6bb98e7c76690 2024-11-20T11:19:16,065 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f48086089cfc43fcb7e6bb98e7c76690, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T11:19:16,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/5ac974337d134411a8df9765aeca66ec as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5ac974337d134411a8df9765aeca66ec 2024-11-20T11:19:16,086 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5ac974337d134411a8df9765aeca66ec, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T11:19:16,088 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 96d866d8db5bf8a73bb64ed0351e8f75 in 286ms, sequenceid=74, compaction requested=false 2024-11-20T11:19:16,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:16,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:16,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-20T11:19:16,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-20T11:19:16,094 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-20T11:19:16,094 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 915 msec 2024-11-20T11:19:16,098 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 925 msec 2024-11-20T11:19:16,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:16,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T11:19:16,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:16,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:16,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:16,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:16,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:16,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:16,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/27bb6028c1ce4e59a311daf174ce6012 is 50, key is test_row_0/A:col10/1732101555851/Put/seqid=0 2024-11-20T11:19:16,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741854_1030 (size=14341) 2024-11-20T11:19:16,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101616234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101616235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101616235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/27bb6028c1ce4e59a311daf174ce6012 2024-11-20T11:19:16,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101616239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101616243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,271 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/16ff31a0809841d0ab2e69dfcecacc95 is 50, key is test_row_0/B:col10/1732101555851/Put/seqid=0 2024-11-20T11:19:16,281 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/6b592734f8244edf9e6e0bd3719b3169 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/6b592734f8244edf9e6e0bd3719b3169 2024-11-20T11:19:16,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T11:19:16,282 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-20T11:19:16,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:16,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-20T11:19:16,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T11:19:16,290 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:16,292 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:16,292 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:16,297 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/A of 96d866d8db5bf8a73bb64ed0351e8f75 into 6b592734f8244edf9e6e0bd3719b3169(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:16,298 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:16,298 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/A, priority=13, startTime=1732101555761; duration=0sec 2024-11-20T11:19:16,298 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:16,298 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:A 2024-11-20T11:19:16,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741855_1031 (size=12001) 2024-11-20T11:19:16,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/16ff31a0809841d0ab2e69dfcecacc95 2024-11-20T11:19:16,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/2f5523e9ca104a08a4c9ec683408e875 is 50, key is test_row_0/C:col10/1732101555851/Put/seqid=0 2024-11-20T11:19:16,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101616345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101616347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101616348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101616348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101616353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741856_1032 (size=12001) 2024-11-20T11:19:16,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/2f5523e9ca104a08a4c9ec683408e875 2024-11-20T11:19:16,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/27bb6028c1ce4e59a311daf174ce6012 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/27bb6028c1ce4e59a311daf174ce6012 2024-11-20T11:19:16,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/27bb6028c1ce4e59a311daf174ce6012, entries=200, sequenceid=91, filesize=14.0 K 2024-11-20T11:19:16,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/16ff31a0809841d0ab2e69dfcecacc95 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/16ff31a0809841d0ab2e69dfcecacc95 2024-11-20T11:19:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T11:19:16,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/16ff31a0809841d0ab2e69dfcecacc95, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T11:19:16,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/2f5523e9ca104a08a4c9ec683408e875 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/2f5523e9ca104a08a4c9ec683408e875 2024-11-20T11:19:16,410 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/2f5523e9ca104a08a4c9ec683408e875, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T11:19:16,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 96d866d8db5bf8a73bb64ed0351e8f75 in 222ms, sequenceid=91, compaction requested=true 2024-11-20T11:19:16,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:16,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:16,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:16,413 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:16,413 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:16,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:16,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:16,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:16,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:16,416 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:16,416 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:16,416 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/B is initiating minor compaction (all files) 2024-11-20T11:19:16,416 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/A is initiating minor compaction (all files) 2024-11-20T11:19:16,416 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/B in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:16,416 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/A in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:16,416 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/331d39a94999402e9c22691f81ad90c1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f48086089cfc43fcb7e6bb98e7c76690, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/16ff31a0809841d0ab2e69dfcecacc95] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=35.3 K 2024-11-20T11:19:16,416 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/6b592734f8244edf9e6e0bd3719b3169, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b1307322d9d341aba59cc514ceb0fca0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/27bb6028c1ce4e59a311daf174ce6012] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=37.5 K 2024-11-20T11:19:16,417 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T11:19:16,417 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T11:19:16,420 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-20T11:19:16,420 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-20T11:19:16,421 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T11:19:16,421 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T11:19:16,423 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b592734f8244edf9e6e0bd3719b3169, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732101554492 2024-11-20T11:19:16,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T11:19:16,423 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 331d39a94999402e9c22691f81ad90c1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732101554492 2024-11-20T11:19:16,424 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1307322d9d341aba59cc514ceb0fca0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732101555164 2024-11-20T11:19:16,424 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting f48086089cfc43fcb7e6bb98e7c76690, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732101555164 2024-11-20T11:19:16,425 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27bb6028c1ce4e59a311daf174ce6012, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732101555851 2024-11-20T11:19:16,425 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 16ff31a0809841d0ab2e69dfcecacc95, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732101555851 2024-11-20T11:19:16,432 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T11:19:16,434 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T11:19:16,434 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T11:19:16,446 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T11:19:16,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:16,447 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T11:19:16,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:16,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:16,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:16,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:16,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:16,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:16,464 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#A#compaction#18 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:16,465 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/f40ccbc811a848e3ad4292df02ae3ecb is 50, key is test_row_0/A:col10/1732101555851/Put/seqid=0 2024-11-20T11:19:16,471 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#B#compaction#19 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:16,472 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/5fe9af9ce8c0442a8f34977ba23d82ed is 50, key is test_row_0/B:col10/1732101555851/Put/seqid=0 2024-11-20T11:19:16,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/2664d85c802443aeb9f839f1551cb0b7 is 50, key is test_row_0/A:col10/1732101556228/Put/seqid=0 2024-11-20T11:19:16,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741857_1033 (size=12207) 2024-11-20T11:19:16,502 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/f40ccbc811a848e3ad4292df02ae3ecb as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/f40ccbc811a848e3ad4292df02ae3ecb 2024-11-20T11:19:16,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741858_1034 (size=12001) 2024-11-20T11:19:16,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741859_1035 (size=12207) 2024-11-20T11:19:16,525 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/A of 96d866d8db5bf8a73bb64ed0351e8f75 into f40ccbc811a848e3ad4292df02ae3ecb(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:16,525 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:16,526 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/A, priority=13, startTime=1732101556412; duration=0sec 2024-11-20T11:19:16,526 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:16,526 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:A 2024-11-20T11:19:16,526 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:16,530 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:16,531 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/C is initiating minor compaction (all files) 2024-11-20T11:19:16,531 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/C in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:16,531 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5a0dfb0c00104d8dbd81d0fe64a7f6a9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5ac974337d134411a8df9765aeca66ec, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/2f5523e9ca104a08a4c9ec683408e875] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=35.3 K 2024-11-20T11:19:16,536 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/5fe9af9ce8c0442a8f34977ba23d82ed as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/5fe9af9ce8c0442a8f34977ba23d82ed 2024-11-20T11:19:16,537 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a0dfb0c00104d8dbd81d0fe64a7f6a9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732101554492 2024-11-20T11:19:16,561 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ac974337d134411a8df9765aeca66ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732101555164 2024-11-20T11:19:16,562 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f5523e9ca104a08a4c9ec683408e875, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732101555851 2024-11-20T11:19:16,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:16,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:16,573 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/B of 96d866d8db5bf8a73bb64ed0351e8f75 into 5fe9af9ce8c0442a8f34977ba23d82ed(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:16,573 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:16,573 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/B, priority=13, startTime=1732101556413; duration=0sec 2024-11-20T11:19:16,573 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:16,574 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:B 2024-11-20T11:19:16,584 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#C#compaction#21 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:16,585 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/c52084e8243a430ba9e32945a0592a41 is 50, key is test_row_0/C:col10/1732101555851/Put/seqid=0 2024-11-20T11:19:16,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101616581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101616583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T11:19:16,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101616587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101616589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101616589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741860_1036 (size=12207) 2024-11-20T11:19:16,633 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/c52084e8243a430ba9e32945a0592a41 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/c52084e8243a430ba9e32945a0592a41 2024-11-20T11:19:16,646 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/C of 96d866d8db5bf8a73bb64ed0351e8f75 into c52084e8243a430ba9e32945a0592a41(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:16,647 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:16,647 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/C, priority=13, startTime=1732101556413; duration=0sec 2024-11-20T11:19:16,647 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:16,647 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:C 2024-11-20T11:19:16,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101616693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101616695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101616696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101616697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101616697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T11:19:16,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101616897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101616899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101616900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101616901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:16,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101616902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:16,913 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/2664d85c802443aeb9f839f1551cb0b7 2024-11-20T11:19:16,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/1c61dda16a7b45bfa021df97c532d811 is 50, key is test_row_0/B:col10/1732101556228/Put/seqid=0 2024-11-20T11:19:16,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741861_1037 (size=12001) 2024-11-20T11:19:16,980 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/1c61dda16a7b45bfa021df97c532d811 2024-11-20T11:19:16,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/a106e912abf94927a9140e96df23dbc0 is 50, key is test_row_0/C:col10/1732101556228/Put/seqid=0 2024-11-20T11:19:17,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741862_1038 (size=12001) 2024-11-20T11:19:17,024 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/a106e912abf94927a9140e96df23dbc0 2024-11-20T11:19:17,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/2664d85c802443aeb9f839f1551cb0b7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/2664d85c802443aeb9f839f1551cb0b7 2024-11-20T11:19:17,049 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/2664d85c802443aeb9f839f1551cb0b7, entries=150, sequenceid=113, filesize=11.7 K 2024-11-20T11:19:17,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/1c61dda16a7b45bfa021df97c532d811 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1c61dda16a7b45bfa021df97c532d811 2024-11-20T11:19:17,061 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1c61dda16a7b45bfa021df97c532d811, entries=150, sequenceid=113, filesize=11.7 K 2024-11-20T11:19:17,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/a106e912abf94927a9140e96df23dbc0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a106e912abf94927a9140e96df23dbc0 2024-11-20T11:19:17,070 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a106e912abf94927a9140e96df23dbc0, entries=150, sequenceid=113, filesize=11.7 K 2024-11-20T11:19:17,071 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 96d866d8db5bf8a73bb64ed0351e8f75 in 624ms, sequenceid=113, compaction requested=false 2024-11-20T11:19:17,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:17,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:17,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-20T11:19:17,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-20T11:19:17,077 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-20T11:19:17,077 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 782 msec 2024-11-20T11:19:17,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 792 msec 2024-11-20T11:19:17,205 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T11:19:17,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:17,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:17,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:17,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:17,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:17,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:17,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:17,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/6b15e454cffe4bcab9c5a3d2e4a81192 is 50, key is test_row_0/A:col10/1732101557202/Put/seqid=0 2024-11-20T11:19:17,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741863_1039 (size=12101) 2024-11-20T11:19:17,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101617235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101617235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101617237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101617243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101617249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/6b15e454cffe4bcab9c5a3d2e4a81192 2024-11-20T11:19:17,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/cdbc1388f4804682b4738ad7ba260f0e is 50, key is test_row_0/B:col10/1732101557202/Put/seqid=0 2024-11-20T11:19:17,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741864_1040 (size=12101) 2024-11-20T11:19:17,295 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/cdbc1388f4804682b4738ad7ba260f0e 2024-11-20T11:19:17,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/dc2a523ea4dd418694906295b887f54d is 50, key is test_row_0/C:col10/1732101557202/Put/seqid=0 2024-11-20T11:19:17,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741865_1041 (size=12101) 2024-11-20T11:19:17,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101617345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101617346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101617345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101617348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101617354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T11:19:17,395 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-20T11:19:17,397 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:17,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-20T11:19:17,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T11:19:17,399 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:17,400 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:17,401 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:17,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T11:19:17,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,553 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101617552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,554 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T11:19:17,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:17,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:17,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:17,555 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:17,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:17,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:17,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101617555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101617556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101617560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101617561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T11:19:17,710 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T11:19:17,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:17,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:17,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:17,711 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:17,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:17,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:17,732 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/dc2a523ea4dd418694906295b887f54d 2024-11-20T11:19:17,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/6b15e454cffe4bcab9c5a3d2e4a81192 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/6b15e454cffe4bcab9c5a3d2e4a81192 2024-11-20T11:19:17,751 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/6b15e454cffe4bcab9c5a3d2e4a81192, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T11:19:17,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/cdbc1388f4804682b4738ad7ba260f0e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/cdbc1388f4804682b4738ad7ba260f0e 2024-11-20T11:19:17,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/cdbc1388f4804682b4738ad7ba260f0e, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T11:19:17,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/dc2a523ea4dd418694906295b887f54d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/dc2a523ea4dd418694906295b887f54d 2024-11-20T11:19:17,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/dc2a523ea4dd418694906295b887f54d, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T11:19:17,771 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 96d866d8db5bf8a73bb64ed0351e8f75 in 567ms, sequenceid=133, compaction requested=true 2024-11-20T11:19:17,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:17,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:17,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:17,772 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:17,772 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:17,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:17,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:17,774 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:17,774 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:17,774 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/B is initiating minor compaction (all files) 2024-11-20T11:19:17,774 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/A is initiating minor compaction (all files) 2024-11-20T11:19:17,774 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/B in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:17,774 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/A in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:17,774 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/5fe9af9ce8c0442a8f34977ba23d82ed, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1c61dda16a7b45bfa021df97c532d811, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/cdbc1388f4804682b4738ad7ba260f0e] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=35.5 K 2024-11-20T11:19:17,774 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/f40ccbc811a848e3ad4292df02ae3ecb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/2664d85c802443aeb9f839f1551cb0b7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/6b15e454cffe4bcab9c5a3d2e4a81192] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=35.5 K 2024-11-20T11:19:17,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:17,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:17,775 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting f40ccbc811a848e3ad4292df02ae3ecb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732101555851 2024-11-20T11:19:17,775 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fe9af9ce8c0442a8f34977ba23d82ed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732101555851 2024-11-20T11:19:17,776 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c61dda16a7b45bfa021df97c532d811, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732101556228 2024-11-20T11:19:17,776 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2664d85c802443aeb9f839f1551cb0b7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732101556228 2024-11-20T11:19:17,777 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting cdbc1388f4804682b4738ad7ba260f0e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732101556584 2024-11-20T11:19:17,777 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b15e454cffe4bcab9c5a3d2e4a81192, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732101556584 2024-11-20T11:19:17,795 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#B#compaction#27 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:17,796 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/43565c5c242e4e81ba2806c1bd21cf37 is 50, key is test_row_0/B:col10/1732101557202/Put/seqid=0 2024-11-20T11:19:17,801 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#A#compaction#28 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:17,802 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/c04b4850729e450ca5ae7121d3b1c732 is 50, key is test_row_0/A:col10/1732101557202/Put/seqid=0 2024-11-20T11:19:17,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741866_1042 (size=12409) 2024-11-20T11:19:17,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741867_1043 (size=12409) 2024-11-20T11:19:17,837 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/43565c5c242e4e81ba2806c1bd21cf37 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/43565c5c242e4e81ba2806c1bd21cf37 2024-11-20T11:19:17,840 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/c04b4850729e450ca5ae7121d3b1c732 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/c04b4850729e450ca5ae7121d3b1c732 2024-11-20T11:19:17,852 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/B of 96d866d8db5bf8a73bb64ed0351e8f75 into 43565c5c242e4e81ba2806c1bd21cf37(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:17,852 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:17,852 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/B, priority=13, startTime=1732101557772; duration=0sec 2024-11-20T11:19:17,853 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:17,853 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:B 2024-11-20T11:19:17,853 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:17,855 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:17,855 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/A of 96d866d8db5bf8a73bb64ed0351e8f75 into c04b4850729e450ca5ae7121d3b1c732(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:17,855 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/C is initiating minor compaction (all files) 2024-11-20T11:19:17,855 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/C in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:17,855 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:17,855 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/A, priority=13, startTime=1732101557772; duration=0sec 2024-11-20T11:19:17,855 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/c52084e8243a430ba9e32945a0592a41, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a106e912abf94927a9140e96df23dbc0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/dc2a523ea4dd418694906295b887f54d] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=35.5 K 2024-11-20T11:19:17,855 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:17,856 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:A 2024-11-20T11:19:17,856 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting c52084e8243a430ba9e32945a0592a41, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732101555851 2024-11-20T11:19:17,857 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting a106e912abf94927a9140e96df23dbc0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732101556228 2024-11-20T11:19:17,858 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting dc2a523ea4dd418694906295b887f54d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732101556584 2024-11-20T11:19:17,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:17,864 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,865 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T11:19:17,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:17,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:17,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:17,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:17,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:17,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:17,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T11:19:17,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:17,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:17,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:17,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:17,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:17,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:17,875 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#C#compaction#30 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:17,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/8fc35313950a4d17bb4753076099bf8b is 50, key is test_row_0/A:col10/1732101557240/Put/seqid=0 2024-11-20T11:19:17,876 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/7f6f26df187b4ab489ed3412de7be0a1 is 50, key is test_row_0/C:col10/1732101557202/Put/seqid=0 2024-11-20T11:19:17,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101617884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101617887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101617886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101617891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741868_1044 (size=12151) 2024-11-20T11:19:17,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101617892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/8fc35313950a4d17bb4753076099bf8b 2024-11-20T11:19:17,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741869_1045 (size=12409) 2024-11-20T11:19:17,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/f64eec5ae9c9465ab9a88cce385ae183 is 50, key is test_row_0/B:col10/1732101557240/Put/seqid=0 2024-11-20T11:19:17,941 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/7f6f26df187b4ab489ed3412de7be0a1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/7f6f26df187b4ab489ed3412de7be0a1 2024-11-20T11:19:17,952 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/C of 96d866d8db5bf8a73bb64ed0351e8f75 into 7f6f26df187b4ab489ed3412de7be0a1(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:17,952 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:17,952 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/C, priority=13, startTime=1732101557774; duration=0sec 2024-11-20T11:19:17,953 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:17,953 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:C 2024-11-20T11:19:17,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741870_1046 (size=12151) 2024-11-20T11:19:17,960 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/f64eec5ae9c9465ab9a88cce385ae183 2024-11-20T11:19:17,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/9ca317d7e6954a049a987a70500014a7 is 50, key is test_row_0/C:col10/1732101557240/Put/seqid=0 2024-11-20T11:19:17,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741871_1047 (size=12151) 2024-11-20T11:19:17,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101617993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101617993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101617994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:17,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:17,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101617996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101618001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T11:19:18,020 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,021 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T11:19:18,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:18,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:18,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:18,022 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:18,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:18,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:18,174 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T11:19:18,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:18,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:18,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:18,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:18,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:18,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:18,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101618200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101618197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101618201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101618202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101618204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,330 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T11:19:18,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:18,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:18,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:18,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:18,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:18,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:18,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/9ca317d7e6954a049a987a70500014a7 2024-11-20T11:19:18,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/8fc35313950a4d17bb4753076099bf8b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/8fc35313950a4d17bb4753076099bf8b 2024-11-20T11:19:18,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/8fc35313950a4d17bb4753076099bf8b, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T11:19:18,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/f64eec5ae9c9465ab9a88cce385ae183 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f64eec5ae9c9465ab9a88cce385ae183 2024-11-20T11:19:18,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f64eec5ae9c9465ab9a88cce385ae183, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T11:19:18,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/9ca317d7e6954a049a987a70500014a7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9ca317d7e6954a049a987a70500014a7 2024-11-20T11:19:18,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9ca317d7e6954a049a987a70500014a7, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T11:19:18,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 96d866d8db5bf8a73bb64ed0351e8f75 in 586ms, sequenceid=157, compaction requested=false 2024-11-20T11:19:18,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:18,489 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T11:19:18,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:18,491 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T11:19:18,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:18,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:18,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:18,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:18,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:18,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:18,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/1e2fd32782e746e0826a682ff2596fdf is 50, key is test_row_0/A:col10/1732101557885/Put/seqid=0 2024-11-20T11:19:18,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T11:19:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:18,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:18,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741872_1048 (size=12151) 2024-11-20T11:19:18,520 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/1e2fd32782e746e0826a682ff2596fdf 2024-11-20T11:19:18,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101618538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101618538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101618540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101618540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101618540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/15772da65d694dd0b2f982883bdaef5c is 50, key is test_row_0/B:col10/1732101557885/Put/seqid=0 2024-11-20T11:19:18,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741873_1049 (size=12151) 2024-11-20T11:19:18,567 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/15772da65d694dd0b2f982883bdaef5c 2024-11-20T11:19:18,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/86cbf769fdcd483f9e942e532a5b91e2 is 50, key is test_row_0/C:col10/1732101557885/Put/seqid=0 2024-11-20T11:19:18,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741874_1050 (size=12151) 2024-11-20T11:19:18,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101618644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101618645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101618646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101618647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101618645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101618849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101618849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101618849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101618852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:18,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101618852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,032 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/86cbf769fdcd483f9e942e532a5b91e2 2024-11-20T11:19:19,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/1e2fd32782e746e0826a682ff2596fdf as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/1e2fd32782e746e0826a682ff2596fdf 2024-11-20T11:19:19,052 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/1e2fd32782e746e0826a682ff2596fdf, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T11:19:19,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/15772da65d694dd0b2f982883bdaef5c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/15772da65d694dd0b2f982883bdaef5c 2024-11-20T11:19:19,063 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/15772da65d694dd0b2f982883bdaef5c, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T11:19:19,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/86cbf769fdcd483f9e942e532a5b91e2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/86cbf769fdcd483f9e942e532a5b91e2 2024-11-20T11:19:19,073 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/86cbf769fdcd483f9e942e532a5b91e2, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T11:19:19,075 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 96d866d8db5bf8a73bb64ed0351e8f75 in 585ms, sequenceid=173, compaction requested=true 2024-11-20T11:19:19,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:19,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:19,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-20T11:19:19,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-20T11:19:19,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-20T11:19:19,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6760 sec 2024-11-20T11:19:19,083 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.6850 sec 2024-11-20T11:19:19,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:19,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T11:19:19,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:19,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:19,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:19,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:19,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:19,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:19,167 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/5f396fe4595246599e26991c151e2b8f is 50, key is test_row_0/A:col10/1732101558538/Put/seqid=0 2024-11-20T11:19:19,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101619169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101619170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101619170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101619172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101619172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741875_1051 (size=14541) 2024-11-20T11:19:19,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/5f396fe4595246599e26991c151e2b8f 2024-11-20T11:19:19,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/74f1cb6d238a4c6fae0ff147dadfcd6c is 50, key is test_row_0/B:col10/1732101558538/Put/seqid=0 2024-11-20T11:19:19,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741876_1052 (size=12151) 2024-11-20T11:19:19,260 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/74f1cb6d238a4c6fae0ff147dadfcd6c 2024-11-20T11:19:19,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101619275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101619275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101619276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101619277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101619278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/ce9a99256c5245e39daab6e2394f3215 is 50, key is test_row_0/C:col10/1732101558538/Put/seqid=0 2024-11-20T11:19:19,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741877_1053 (size=12151) 2024-11-20T11:19:19,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101619480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101619481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101619480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101619484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101619485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T11:19:19,506 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-20T11:19:19,508 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:19,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-20T11:19:19,511 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:19,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T11:19:19,513 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:19,513 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:19,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T11:19:19,666 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T11:19:19,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:19,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:19,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:19,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:19,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:19,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:19,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/ce9a99256c5245e39daab6e2394f3215 2024-11-20T11:19:19,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/5f396fe4595246599e26991c151e2b8f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/5f396fe4595246599e26991c151e2b8f 2024-11-20T11:19:19,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/5f396fe4595246599e26991c151e2b8f, entries=200, sequenceid=195, filesize=14.2 K 2024-11-20T11:19:19,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/74f1cb6d238a4c6fae0ff147dadfcd6c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/74f1cb6d238a4c6fae0ff147dadfcd6c 2024-11-20T11:19:19,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/74f1cb6d238a4c6fae0ff147dadfcd6c, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T11:19:19,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/ce9a99256c5245e39daab6e2394f3215 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ce9a99256c5245e39daab6e2394f3215 2024-11-20T11:19:19,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ce9a99256c5245e39daab6e2394f3215, entries=150, sequenceid=195, filesize=11.9 K 2024-11-20T11:19:19,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 96d866d8db5bf8a73bb64ed0351e8f75 in 581ms, sequenceid=195, compaction requested=true 2024-11-20T11:19:19,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:19,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:19,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:19,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:19,739 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:19:19,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:19,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:19,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T11:19:19,740 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:19:19,743 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51252 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:19:19,743 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/A is initiating minor compaction (all files) 2024-11-20T11:19:19,743 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/A in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:19,743 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/c04b4850729e450ca5ae7121d3b1c732, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/8fc35313950a4d17bb4753076099bf8b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/1e2fd32782e746e0826a682ff2596fdf, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/5f396fe4595246599e26991c151e2b8f] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=50.1 K 2024-11-20T11:19:19,744 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:19:19,744 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/B is initiating minor compaction (all files) 2024-11-20T11:19:19,744 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/B in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:19,744 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/43565c5c242e4e81ba2806c1bd21cf37, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f64eec5ae9c9465ab9a88cce385ae183, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/15772da65d694dd0b2f982883bdaef5c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/74f1cb6d238a4c6fae0ff147dadfcd6c] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=47.7 K 2024-11-20T11:19:19,745 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting c04b4850729e450ca5ae7121d3b1c732, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732101556584 2024-11-20T11:19:19,746 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 43565c5c242e4e81ba2806c1bd21cf37, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732101556584 2024-11-20T11:19:19,746 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fc35313950a4d17bb4753076099bf8b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732101557235 2024-11-20T11:19:19,747 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting f64eec5ae9c9465ab9a88cce385ae183, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732101557235 2024-11-20T11:19:19,747 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e2fd32782e746e0826a682ff2596fdf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732101557885 2024-11-20T11:19:19,748 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 15772da65d694dd0b2f982883bdaef5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732101557885 2024-11-20T11:19:19,748 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f396fe4595246599e26991c151e2b8f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732101558523 2024-11-20T11:19:19,749 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 74f1cb6d238a4c6fae0ff147dadfcd6c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732101558523 2024-11-20T11:19:19,782 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#A#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:19,782 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#B#compaction#40 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:19,783 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/696f8e03bcb6471f89f2e4f2c5ff18ea is 50, key is test_row_0/A:col10/1732101558538/Put/seqid=0 2024-11-20T11:19:19,784 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/b6fc4bd5857a4effb7c8bdfb5e5a6a75 is 50, key is test_row_0/B:col10/1732101558538/Put/seqid=0 2024-11-20T11:19:19,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T11:19:19,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:19,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:19,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:19,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:19,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:19,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:19,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:19,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741878_1054 (size=12595) 2024-11-20T11:19:19,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T11:19:19,821 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T11:19:19,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:19,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:19,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:19,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101619813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,822 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:19,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101619814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:19,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101619816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/b42bcc3d62f84c94b852287350d0f07f is 50, key is test_row_0/A:col10/1732101559161/Put/seqid=0 2024-11-20T11:19:19,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:19,826 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/696f8e03bcb6471f89f2e4f2c5ff18ea as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/696f8e03bcb6471f89f2e4f2c5ff18ea 2024-11-20T11:19:19,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101619822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101619825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741879_1055 (size=12595) 2024-11-20T11:19:19,840 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/A of 96d866d8db5bf8a73bb64ed0351e8f75 into 696f8e03bcb6471f89f2e4f2c5ff18ea(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:19,840 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:19,840 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/A, priority=12, startTime=1732101559739; duration=0sec 2024-11-20T11:19:19,841 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:19,841 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:A 2024-11-20T11:19:19,841 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:19:19,847 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:19:19,847 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/C is initiating minor compaction (all files) 2024-11-20T11:19:19,847 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/C in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:19,847 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/7f6f26df187b4ab489ed3412de7be0a1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9ca317d7e6954a049a987a70500014a7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/86cbf769fdcd483f9e942e532a5b91e2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ce9a99256c5245e39daab6e2394f3215] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=47.7 K 2024-11-20T11:19:19,848 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f6f26df187b4ab489ed3412de7be0a1, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732101556584 2024-11-20T11:19:19,848 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/b6fc4bd5857a4effb7c8bdfb5e5a6a75 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/b6fc4bd5857a4effb7c8bdfb5e5a6a75 2024-11-20T11:19:19,849 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ca317d7e6954a049a987a70500014a7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732101557235 2024-11-20T11:19:19,850 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86cbf769fdcd483f9e942e532a5b91e2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732101557885 2024-11-20T11:19:19,852 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce9a99256c5245e39daab6e2394f3215, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732101558523 2024-11-20T11:19:19,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741880_1056 (size=12151) 2024-11-20T11:19:19,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/b42bcc3d62f84c94b852287350d0f07f 2024-11-20T11:19:19,863 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/B of 96d866d8db5bf8a73bb64ed0351e8f75 into b6fc4bd5857a4effb7c8bdfb5e5a6a75(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:19,863 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:19,863 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/B, priority=12, startTime=1732101559739; duration=0sec 2024-11-20T11:19:19,864 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:19,865 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:B 2024-11-20T11:19:19,878 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#C#compaction#42 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:19,878 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/d96112497c1d4540946c4b205050d275 is 50, key is test_row_0/C:col10/1732101558538/Put/seqid=0 2024-11-20T11:19:19,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/074fd5365c794660b55b14ced76d002b is 50, key is test_row_0/B:col10/1732101559161/Put/seqid=0 2024-11-20T11:19:19,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741881_1057 (size=12595) 2024-11-20T11:19:19,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741882_1058 (size=12151) 2024-11-20T11:19:19,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/074fd5365c794660b55b14ced76d002b 2024-11-20T11:19:19,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101619924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101619925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101619925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101619928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:19,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101619931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/a8844b5c620d487dac249e271f0083d8 is 50, key is test_row_0/C:col10/1732101559161/Put/seqid=0 2024-11-20T11:19:19,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741883_1059 (size=12151) 2024-11-20T11:19:19,978 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:19,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T11:19:19,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:19,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:19,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:19,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:19,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:19,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T11:19:20,132 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T11:19:20,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101620131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:20,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101620132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101620133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101620135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101620135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,287 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T11:19:20,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:20,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,316 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/d96112497c1d4540946c4b205050d275 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/d96112497c1d4540946c4b205050d275 2024-11-20T11:19:20,327 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/C of 96d866d8db5bf8a73bb64ed0351e8f75 into d96112497c1d4540946c4b205050d275(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:20,328 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:20,328 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/C, priority=12, startTime=1732101559740; duration=0sec 2024-11-20T11:19:20,328 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:20,328 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:C 2024-11-20T11:19:20,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/a8844b5c620d487dac249e271f0083d8 2024-11-20T11:19:20,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/b42bcc3d62f84c94b852287350d0f07f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b42bcc3d62f84c94b852287350d0f07f 2024-11-20T11:19:20,383 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b42bcc3d62f84c94b852287350d0f07f, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T11:19:20,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/074fd5365c794660b55b14ced76d002b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/074fd5365c794660b55b14ced76d002b 2024-11-20T11:19:20,392 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/074fd5365c794660b55b14ced76d002b, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T11:19:20,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/a8844b5c620d487dac249e271f0083d8 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a8844b5c620d487dac249e271f0083d8 2024-11-20T11:19:20,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a8844b5c620d487dac249e271f0083d8, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T11:19:20,402 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 96d866d8db5bf8a73bb64ed0351e8f75 in 614ms, sequenceid=211, compaction requested=false 2024-11-20T11:19:20,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:20,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:20,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T11:19:20,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:20,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:20,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:20,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:20,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:20,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:20,447 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T11:19:20,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/4d0ff98e8c8c46c18d7dd899c12d8aa8 is 50, key is test_row_0/A:col10/1732101560437/Put/seqid=0 2024-11-20T11:19:20,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:20,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101620454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101620456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101620456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101620460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101620460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741884_1060 (size=12151) 2024-11-20T11:19:20,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101620562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101620562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101620565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101620563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101620565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,601 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T11:19:20,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:20,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T11:19:20,755 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T11:19:20,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:20,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101620764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101620769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101620769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101620770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:20,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101620771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/4d0ff98e8c8c46c18d7dd899c12d8aa8 2024-11-20T11:19:20,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/1f94ed39ac4e48faab94cd454a8b609c is 50, key is test_row_0/B:col10/1732101560437/Put/seqid=0 2024-11-20T11:19:20,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741885_1061 (size=12151) 2024-11-20T11:19:20,899 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/1f94ed39ac4e48faab94cd454a8b609c 2024-11-20T11:19:20,911 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:20,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T11:19:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,913 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:20,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/5e19a9ee8e6b4b5894e220a54348e32c is 50, key is test_row_0/C:col10/1732101560437/Put/seqid=0 2024-11-20T11:19:20,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741886_1062 (size=12151) 2024-11-20T11:19:20,937 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/5e19a9ee8e6b4b5894e220a54348e32c 2024-11-20T11:19:20,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/4d0ff98e8c8c46c18d7dd899c12d8aa8 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4d0ff98e8c8c46c18d7dd899c12d8aa8 2024-11-20T11:19:20,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4d0ff98e8c8c46c18d7dd899c12d8aa8, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T11:19:20,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/1f94ed39ac4e48faab94cd454a8b609c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1f94ed39ac4e48faab94cd454a8b609c 2024-11-20T11:19:20,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1f94ed39ac4e48faab94cd454a8b609c, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T11:19:20,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/5e19a9ee8e6b4b5894e220a54348e32c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5e19a9ee8e6b4b5894e220a54348e32c 2024-11-20T11:19:20,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5e19a9ee8e6b4b5894e220a54348e32c, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T11:19:20,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 96d866d8db5bf8a73bb64ed0351e8f75 in 541ms, sequenceid=235, compaction requested=true 2024-11-20T11:19:20,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:20,981 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:20,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:20,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:20,981 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:20,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:20,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:20,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:20,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:20,983 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:20,983 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/A is initiating minor compaction (all files) 2024-11-20T11:19:20,983 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/A in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,983 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/696f8e03bcb6471f89f2e4f2c5ff18ea, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b42bcc3d62f84c94b852287350d0f07f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4d0ff98e8c8c46c18d7dd899c12d8aa8] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=36.0 K 2024-11-20T11:19:20,984 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:20,984 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 696f8e03bcb6471f89f2e4f2c5ff18ea, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732101558523 2024-11-20T11:19:20,984 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/B is initiating minor compaction (all files) 2024-11-20T11:19:20,984 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/B in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:20,984 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/b6fc4bd5857a4effb7c8bdfb5e5a6a75, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/074fd5365c794660b55b14ced76d002b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1f94ed39ac4e48faab94cd454a8b609c] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=36.0 K 2024-11-20T11:19:20,985 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting b42bcc3d62f84c94b852287350d0f07f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732101559161 2024-11-20T11:19:20,985 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d0ff98e8c8c46c18d7dd899c12d8aa8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732101559813 2024-11-20T11:19:20,985 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting b6fc4bd5857a4effb7c8bdfb5e5a6a75, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732101558523 2024-11-20T11:19:20,986 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 074fd5365c794660b55b14ced76d002b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732101559161 2024-11-20T11:19:20,987 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f94ed39ac4e48faab94cd454a8b609c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732101559813 2024-11-20T11:19:21,000 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#A#compaction#48 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:21,001 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/2a1d8d0e26ec44179496cdcedd24773d is 50, key is test_row_0/A:col10/1732101560437/Put/seqid=0 2024-11-20T11:19:21,012 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#B#compaction#49 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:21,013 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/4308e971c0fe46b6b68c158125e0256c is 50, key is test_row_0/B:col10/1732101560437/Put/seqid=0 2024-11-20T11:19:21,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741887_1063 (size=12697) 2024-11-20T11:19:21,045 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/2a1d8d0e26ec44179496cdcedd24773d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/2a1d8d0e26ec44179496cdcedd24773d 2024-11-20T11:19:21,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741888_1064 (size=12697) 2024-11-20T11:19:21,056 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/A of 96d866d8db5bf8a73bb64ed0351e8f75 into 2a1d8d0e26ec44179496cdcedd24773d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:21,056 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:21,056 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/A, priority=13, startTime=1732101560980; duration=0sec 2024-11-20T11:19:21,056 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:21,056 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:A 2024-11-20T11:19:21,058 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:21,060 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:21,060 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/C is initiating minor compaction (all files) 2024-11-20T11:19:21,061 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/C in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:21,061 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/d96112497c1d4540946c4b205050d275, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a8844b5c620d487dac249e271f0083d8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5e19a9ee8e6b4b5894e220a54348e32c] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=36.0 K 2024-11-20T11:19:21,063 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting d96112497c1d4540946c4b205050d275, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732101558523 2024-11-20T11:19:21,064 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8844b5c620d487dac249e271f0083d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732101559161 2024-11-20T11:19:21,065 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e19a9ee8e6b4b5894e220a54348e32c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732101559813 2024-11-20T11:19:21,066 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T11:19:21,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:21,069 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T11:19:21,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:21,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:21,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:21,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:21,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:21,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:21,070 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/4308e971c0fe46b6b68c158125e0256c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/4308e971c0fe46b6b68c158125e0256c 2024-11-20T11:19:21,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:21,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:21,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/7db94e993c234214bd6f4168eb887018 is 50, key is test_row_0/A:col10/1732101560458/Put/seqid=0 2024-11-20T11:19:21,081 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/B of 96d866d8db5bf8a73bb64ed0351e8f75 into 4308e971c0fe46b6b68c158125e0256c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:21,081 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:21,081 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/B, priority=13, startTime=1732101560981; duration=0sec 2024-11-20T11:19:21,082 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:21,082 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:B 2024-11-20T11:19:21,092 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#C#compaction#51 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:21,093 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/86fc5325ade043e98c78b9c588ea75fc is 50, key is test_row_0/C:col10/1732101560437/Put/seqid=0 2024-11-20T11:19:21,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741889_1065 (size=12151) 2024-11-20T11:19:21,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101621096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101621095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101621101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101621101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101621102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741890_1066 (size=12697) 2024-11-20T11:19:21,123 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/86fc5325ade043e98c78b9c588ea75fc as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/86fc5325ade043e98c78b9c588ea75fc 2024-11-20T11:19:21,134 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/C of 96d866d8db5bf8a73bb64ed0351e8f75 into 86fc5325ade043e98c78b9c588ea75fc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:21,134 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:21,134 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/C, priority=13, startTime=1732101560982; duration=0sec 2024-11-20T11:19:21,134 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:21,135 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:C 2024-11-20T11:19:21,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101621204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101621204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101621207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101621211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101621211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101621412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101621412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101621413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101621413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101621415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,501 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/7db94e993c234214bd6f4168eb887018 2024-11-20T11:19:21,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/baa86b0e39ff479abf77e3adb8d9536e is 50, key is test_row_0/B:col10/1732101560458/Put/seqid=0 2024-11-20T11:19:21,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741891_1067 (size=12151) 2024-11-20T11:19:21,530 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/baa86b0e39ff479abf77e3adb8d9536e 2024-11-20T11:19:21,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/6e7ce4c64877462ebebe6260ee072b56 is 50, key is test_row_0/C:col10/1732101560458/Put/seqid=0 2024-11-20T11:19:21,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741892_1068 (size=12151) 2024-11-20T11:19:21,564 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/6e7ce4c64877462ebebe6260ee072b56 2024-11-20T11:19:21,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/7db94e993c234214bd6f4168eb887018 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/7db94e993c234214bd6f4168eb887018 2024-11-20T11:19:21,583 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/7db94e993c234214bd6f4168eb887018, entries=150, sequenceid=251, filesize=11.9 K 2024-11-20T11:19:21,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/baa86b0e39ff479abf77e3adb8d9536e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/baa86b0e39ff479abf77e3adb8d9536e 2024-11-20T11:19:21,593 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/baa86b0e39ff479abf77e3adb8d9536e, entries=150, sequenceid=251, filesize=11.9 K 2024-11-20T11:19:21,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/6e7ce4c64877462ebebe6260ee072b56 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/6e7ce4c64877462ebebe6260ee072b56 2024-11-20T11:19:21,603 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/6e7ce4c64877462ebebe6260ee072b56, entries=150, sequenceid=251, filesize=11.9 K 2024-11-20T11:19:21,605 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 96d866d8db5bf8a73bb64ed0351e8f75 in 536ms, sequenceid=251, compaction requested=false 2024-11-20T11:19:21,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:21,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:21,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-20T11:19:21,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-20T11:19:21,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-20T11:19:21,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0940 sec 2024-11-20T11:19:21,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 2.1040 sec 2024-11-20T11:19:21,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T11:19:21,618 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-20T11:19:21,620 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:21,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-20T11:19:21,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T11:19:21,623 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:21,624 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:21,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:21,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T11:19:21,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T11:19:21,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:21,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:21,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:21,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:21,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:21,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:21,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:21,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101621737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101621738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101621737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/614791872a244a5781f47c585f847768 is 50, key is test_row_0/A:col10/1732101561717/Put/seqid=0 2024-11-20T11:19:21,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101621740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101621741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741893_1069 (size=12301) 2024-11-20T11:19:21,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/614791872a244a5781f47c585f847768 2024-11-20T11:19:21,778 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/62bb63ba65ff4f1ba22e1370d2334cb7 is 50, key is test_row_0/B:col10/1732101561717/Put/seqid=0 2024-11-20T11:19:21,779 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T11:19:21,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:21,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:21,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:21,780 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:21,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:21,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:21,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741894_1070 (size=12301) 2024-11-20T11:19:21,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/62bb63ba65ff4f1ba22e1370d2334cb7 2024-11-20T11:19:21,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/96ef7e49a3db45e6b036c2d68b2e6659 is 50, key is test_row_0/C:col10/1732101561717/Put/seqid=0 2024-11-20T11:19:21,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741895_1071 (size=12301) 2024-11-20T11:19:21,818 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/96ef7e49a3db45e6b036c2d68b2e6659 2024-11-20T11:19:21,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/614791872a244a5781f47c585f847768 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/614791872a244a5781f47c585f847768 2024-11-20T11:19:21,831 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/614791872a244a5781f47c585f847768, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T11:19:21,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/62bb63ba65ff4f1ba22e1370d2334cb7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/62bb63ba65ff4f1ba22e1370d2334cb7 2024-11-20T11:19:21,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/62bb63ba65ff4f1ba22e1370d2334cb7, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T11:19:21,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/96ef7e49a3db45e6b036c2d68b2e6659 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/96ef7e49a3db45e6b036c2d68b2e6659 2024-11-20T11:19:21,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/96ef7e49a3db45e6b036c2d68b2e6659, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T11:19:21,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 96d866d8db5bf8a73bb64ed0351e8f75 in 127ms, sequenceid=276, compaction requested=true 2024-11-20T11:19:21,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:21,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:21,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:21,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:21,852 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:21,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:21,852 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:21,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:21,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:21,854 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:21,854 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:21,854 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/A is initiating minor compaction (all files) 2024-11-20T11:19:21,854 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/B is initiating minor compaction (all files) 2024-11-20T11:19:21,854 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/A in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:21,854 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/B in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:21,854 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/2a1d8d0e26ec44179496cdcedd24773d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/7db94e993c234214bd6f4168eb887018, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/614791872a244a5781f47c585f847768] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=36.3 K 2024-11-20T11:19:21,854 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/4308e971c0fe46b6b68c158125e0256c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/baa86b0e39ff479abf77e3adb8d9536e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/62bb63ba65ff4f1ba22e1370d2334cb7] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=36.3 K 2024-11-20T11:19:21,854 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a1d8d0e26ec44179496cdcedd24773d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732101559813 2024-11-20T11:19:21,855 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 4308e971c0fe46b6b68c158125e0256c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732101559813 2024-11-20T11:19:21,855 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7db94e993c234214bd6f4168eb887018, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732101560452 2024-11-20T11:19:21,855 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting baa86b0e39ff479abf77e3adb8d9536e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732101560452 2024-11-20T11:19:21,855 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 614791872a244a5781f47c585f847768, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732101561091 2024-11-20T11:19:21,855 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 62bb63ba65ff4f1ba22e1370d2334cb7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732101561091 2024-11-20T11:19:21,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:21,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T11:19:21,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:21,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:21,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:21,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:21,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:21,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:21,879 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#B#compaction#57 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:21,880 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/bf7773ba5c42487fb048d0ef74a06ec9 is 50, key is test_row_0/B:col10/1732101561717/Put/seqid=0 2024-11-20T11:19:21,884 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#A#compaction#58 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:21,886 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/29ffe433ee814188b39da1f7ba079908 is 50, key is test_row_0/A:col10/1732101561717/Put/seqid=0 2024-11-20T11:19:21,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/67b2bee4864a42b1919fe729aedd78ee is 50, key is test_row_0/A:col10/1732101561732/Put/seqid=0 2024-11-20T11:19:21,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741896_1072 (size=12949) 2024-11-20T11:19:21,919 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/bf7773ba5c42487fb048d0ef74a06ec9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/bf7773ba5c42487fb048d0ef74a06ec9 2024-11-20T11:19:21,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T11:19:21,926 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/B of 96d866d8db5bf8a73bb64ed0351e8f75 into bf7773ba5c42487fb048d0ef74a06ec9(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:21,926 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:21,926 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/B, priority=13, startTime=1732101561852; duration=0sec 2024-11-20T11:19:21,926 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:21,926 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:B 2024-11-20T11:19:21,927 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:21,928 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:21,928 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/C is initiating minor compaction (all files) 2024-11-20T11:19:21,928 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/C in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:21,928 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/86fc5325ade043e98c78b9c588ea75fc, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/6e7ce4c64877462ebebe6260ee072b56, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/96ef7e49a3db45e6b036c2d68b2e6659] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=36.3 K 2024-11-20T11:19:21,929 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 86fc5325ade043e98c78b9c588ea75fc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732101559813 2024-11-20T11:19:21,929 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e7ce4c64877462ebebe6260ee072b56, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732101560452 2024-11-20T11:19:21,930 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 96ef7e49a3db45e6b036c2d68b2e6659, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732101561091 2024-11-20T11:19:21,933 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T11:19:21,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:21,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:21,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:21,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:21,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:21,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:21,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101621915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101621917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741897_1073 (size=12949) 2024-11-20T11:19:21,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101621932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101621932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:21,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101621936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:21,957 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/29ffe433ee814188b39da1f7ba079908 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/29ffe433ee814188b39da1f7ba079908 2024-11-20T11:19:21,964 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/A of 96d866d8db5bf8a73bb64ed0351e8f75 into 29ffe433ee814188b39da1f7ba079908(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:21,964 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:21,964 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/A, priority=13, startTime=1732101561852; duration=0sec 2024-11-20T11:19:21,964 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:21,964 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:A 2024-11-20T11:19:21,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741898_1074 (size=17181) 2024-11-20T11:19:21,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/67b2bee4864a42b1919fe729aedd78ee 2024-11-20T11:19:21,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/213eff694aa943e1aee2d8c148bab137 is 50, key is test_row_0/B:col10/1732101561732/Put/seqid=0 2024-11-20T11:19:21,994 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#C#compaction#60 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:21,995 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/1d11a2a86b474be8ada819459efbb6a0 is 50, key is test_row_0/C:col10/1732101561717/Put/seqid=0 2024-11-20T11:19:22,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741899_1075 (size=12301) 2024-11-20T11:19:22,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/213eff694aa943e1aee2d8c148bab137 2024-11-20T11:19:22,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/9f6bf422607c4ec88f6cdbed2b376c01 is 50, key is test_row_0/C:col10/1732101561732/Put/seqid=0 2024-11-20T11:19:22,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741900_1076 (size=12949) 2024-11-20T11:19:22,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101622057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101622058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101622059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741901_1077 (size=12301) 2024-11-20T11:19:22,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101622059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,064 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/1d11a2a86b474be8ada819459efbb6a0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1d11a2a86b474be8ada819459efbb6a0 2024-11-20T11:19:22,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101622059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,072 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/C of 96d866d8db5bf8a73bb64ed0351e8f75 into 1d11a2a86b474be8ada819459efbb6a0(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:22,072 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:22,072 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/C, priority=13, startTime=1732101561852; duration=0sec 2024-11-20T11:19:22,072 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:22,072 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:C 2024-11-20T11:19:22,087 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T11:19:22,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:22,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:22,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:22,088 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:22,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:22,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:22,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T11:19:22,241 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T11:19:22,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:22,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:22,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:22,243 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:22,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:22,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:22,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101622263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101622265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101622265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101622268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101622268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,396 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T11:19:22,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:22,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:22,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:22,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:22,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:22,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:22,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/9f6bf422607c4ec88f6cdbed2b376c01 2024-11-20T11:19:22,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/67b2bee4864a42b1919fe729aedd78ee as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/67b2bee4864a42b1919fe729aedd78ee 2024-11-20T11:19:22,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/67b2bee4864a42b1919fe729aedd78ee, entries=250, sequenceid=293, filesize=16.8 K 2024-11-20T11:19:22,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/213eff694aa943e1aee2d8c148bab137 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/213eff694aa943e1aee2d8c148bab137 2024-11-20T11:19:22,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/213eff694aa943e1aee2d8c148bab137, entries=150, sequenceid=293, filesize=12.0 K 2024-11-20T11:19:22,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/9f6bf422607c4ec88f6cdbed2b376c01 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9f6bf422607c4ec88f6cdbed2b376c01 2024-11-20T11:19:22,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9f6bf422607c4ec88f6cdbed2b376c01, entries=150, sequenceid=293, filesize=12.0 K 2024-11-20T11:19:22,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 96d866d8db5bf8a73bb64ed0351e8f75 in 635ms, sequenceid=293, compaction requested=false 2024-11-20T11:19:22,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:22,549 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T11:19:22,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:22,550 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T11:19:22,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:22,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:22,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:22,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:22,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:22,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:22,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/50fb24cc835d469ea284f5bfbe700f9b is 50, key is test_row_0/A:col10/1732101561932/Put/seqid=0 2024-11-20T11:19:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:22,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:22,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101622582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101622583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741902_1078 (size=12301) 2024-11-20T11:19:22,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101622584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101622586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101622588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,594 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/50fb24cc835d469ea284f5bfbe700f9b 2024-11-20T11:19:22,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/1b0bb4abea144f1ab6199df1e8c5824e is 50, key is test_row_0/B:col10/1732101561932/Put/seqid=0 2024-11-20T11:19:22,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741903_1079 (size=12301) 2024-11-20T11:19:22,634 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/1b0bb4abea144f1ab6199df1e8c5824e 2024-11-20T11:19:22,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/4626c1292a1b410484a7b1d74754688e is 50, key is test_row_0/C:col10/1732101561932/Put/seqid=0 2024-11-20T11:19:22,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741904_1080 (size=12301) 2024-11-20T11:19:22,666 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/4626c1292a1b410484a7b1d74754688e 2024-11-20T11:19:22,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/50fb24cc835d469ea284f5bfbe700f9b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/50fb24cc835d469ea284f5bfbe700f9b 2024-11-20T11:19:22,686 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/50fb24cc835d469ea284f5bfbe700f9b, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T11:19:22,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/1b0bb4abea144f1ab6199df1e8c5824e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1b0bb4abea144f1ab6199df1e8c5824e 2024-11-20T11:19:22,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101622688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101622688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101622691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101622689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101622691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,701 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1b0bb4abea144f1ab6199df1e8c5824e, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T11:19:22,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/4626c1292a1b410484a7b1d74754688e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/4626c1292a1b410484a7b1d74754688e 2024-11-20T11:19:22,711 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/4626c1292a1b410484a7b1d74754688e, entries=150, sequenceid=315, filesize=12.0 K 2024-11-20T11:19:22,713 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 96d866d8db5bf8a73bb64ed0351e8f75 in 163ms, sequenceid=315, compaction requested=true 2024-11-20T11:19:22,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:22,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:22,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-20T11:19:22,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-20T11:19:22,717 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-20T11:19:22,717 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0900 sec 2024-11-20T11:19:22,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.0980 sec 2024-11-20T11:19:22,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T11:19:22,727 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-20T11:19:22,729 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:22,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-20T11:19:22,731 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:22,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T11:19:22,732 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:22,733 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T11:19:22,885 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T11:19:22,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:22,886 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T11:19:22,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:22,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:22,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:22,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:22,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:22,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:22,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/b2dd27461dc843549125c24ed07abfb4 is 50, key is test_row_0/A:col10/1732101562580/Put/seqid=0 2024-11-20T11:19:22,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741905_1081 (size=12301) 2024-11-20T11:19:22,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:22,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:22,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101622911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101622912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101622914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101622915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:22,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:22,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101622916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101623017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101623018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101623018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101623021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101623021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T11:19:23,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101623220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101623224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101623224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101623225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101623225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,299 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/b2dd27461dc843549125c24ed07abfb4 2024-11-20T11:19:23,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/f2d8e51821f647d0a1d66d9ac292617f is 50, key is test_row_0/B:col10/1732101562580/Put/seqid=0 2024-11-20T11:19:23,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741906_1082 (size=12301) 2024-11-20T11:19:23,323 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/f2d8e51821f647d0a1d66d9ac292617f 2024-11-20T11:19:23,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/1a9e8c95ee9544f7a70324f3a2371103 is 50, key is test_row_0/C:col10/1732101562580/Put/seqid=0 2024-11-20T11:19:23,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T11:19:23,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741907_1083 (size=12301) 2024-11-20T11:19:23,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101623523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101623526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101623527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101623528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:23,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101623529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,740 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/1a9e8c95ee9544f7a70324f3a2371103 2024-11-20T11:19:23,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/b2dd27461dc843549125c24ed07abfb4 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b2dd27461dc843549125c24ed07abfb4 2024-11-20T11:19:23,757 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b2dd27461dc843549125c24ed07abfb4, entries=150, sequenceid=332, filesize=12.0 K 2024-11-20T11:19:23,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/f2d8e51821f647d0a1d66d9ac292617f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f2d8e51821f647d0a1d66d9ac292617f 2024-11-20T11:19:23,764 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f2d8e51821f647d0a1d66d9ac292617f, entries=150, sequenceid=332, filesize=12.0 K 2024-11-20T11:19:23,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/1a9e8c95ee9544f7a70324f3a2371103 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1a9e8c95ee9544f7a70324f3a2371103 2024-11-20T11:19:23,771 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1a9e8c95ee9544f7a70324f3a2371103, entries=150, sequenceid=332, filesize=12.0 K 2024-11-20T11:19:23,772 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 96d866d8db5bf8a73bb64ed0351e8f75 in 886ms, sequenceid=332, compaction requested=true 2024-11-20T11:19:23,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:23,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:23,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-20T11:19:23,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-20T11:19:23,776 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-20T11:19:23,776 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0410 sec 2024-11-20T11:19:23,778 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.0470 sec 2024-11-20T11:19:23,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T11:19:23,835 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-20T11:19:23,837 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:23,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-20T11:19:23,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T11:19:23,840 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:23,840 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:23,841 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:23,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T11:19:23,993 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:23,993 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T11:19:23,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:23,994 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T11:19:23,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:23,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:23,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:23,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:23,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:23,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:24,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/0df1b55b85304e98adde93a65c3bc0ae is 50, key is test_row_0/A:col10/1732101562914/Put/seqid=0 2024-11-20T11:19:24,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741908_1084 (size=12301) 2024-11-20T11:19:24,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:24,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:24,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101624049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101624051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101624052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101624052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101624054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T11:19:24,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101624156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101624157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101624157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101624157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101624160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101624362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101624363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101624363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101624363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101624364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,416 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/0df1b55b85304e98adde93a65c3bc0ae 2024-11-20T11:19:24,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/907a04771e0f4ae1baf53635b6a79454 is 50, key is test_row_0/B:col10/1732101562914/Put/seqid=0 2024-11-20T11:19:24,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741909_1085 (size=12301) 2024-11-20T11:19:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T11:19:24,444 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/907a04771e0f4ae1baf53635b6a79454 2024-11-20T11:19:24,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/ad56fee419144ffdb0da9584ae227b1e is 50, key is test_row_0/C:col10/1732101562914/Put/seqid=0 2024-11-20T11:19:24,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741910_1086 (size=12301) 2024-11-20T11:19:24,477 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/ad56fee419144ffdb0da9584ae227b1e 2024-11-20T11:19:24,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/0df1b55b85304e98adde93a65c3bc0ae as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/0df1b55b85304e98adde93a65c3bc0ae 2024-11-20T11:19:24,495 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/0df1b55b85304e98adde93a65c3bc0ae, entries=150, sequenceid=351, filesize=12.0 K 2024-11-20T11:19:24,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/907a04771e0f4ae1baf53635b6a79454 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/907a04771e0f4ae1baf53635b6a79454 2024-11-20T11:19:24,503 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/907a04771e0f4ae1baf53635b6a79454, entries=150, sequenceid=351, filesize=12.0 K 2024-11-20T11:19:24,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/ad56fee419144ffdb0da9584ae227b1e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ad56fee419144ffdb0da9584ae227b1e 2024-11-20T11:19:24,512 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ad56fee419144ffdb0da9584ae227b1e, entries=150, sequenceid=351, filesize=12.0 K 2024-11-20T11:19:24,514 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=107.34 KB/109920 for 96d866d8db5bf8a73bb64ed0351e8f75 in 520ms, sequenceid=351, compaction requested=true 2024-11-20T11:19:24,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:24,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:24,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-20T11:19:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-20T11:19:24,517 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-20T11:19:24,517 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 675 msec 2024-11-20T11:19:24,519 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 681 msec 2024-11-20T11:19:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:24,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T11:19:24,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:24,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:24,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:24,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:24,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:24,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:24,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/50527ba70541483d9bebeb50d7902814 is 50, key is test_row_0/A:col10/1732101564052/Put/seqid=0 2024-11-20T11:19:24,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741911_1087 (size=17181) 2024-11-20T11:19:24,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/50527ba70541483d9bebeb50d7902814 2024-11-20T11:19:24,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101624688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101624693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101624694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/e3dd528907e4474aab72dff1ea687fdd is 50, key is test_row_0/B:col10/1732101564052/Put/seqid=0 2024-11-20T11:19:24,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101624695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101624696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741912_1088 (size=12301) 2024-11-20T11:19:24,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101624808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101624808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101624808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101624808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:24,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101624808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:24,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T11:19:24,943 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-20T11:19:24,944 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:24,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-20T11:19:24,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T11:19:24,949 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:24,950 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:24,950 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:25,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101625011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101625011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101625012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101625013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101625013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T11:19:25,101 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T11:19:25,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:25,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:25,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:25,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:25,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:25,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:25,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/e3dd528907e4474aab72dff1ea687fdd 2024-11-20T11:19:25,115 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/3a9f22f770eb46c3973dc3fda0eab7b7 is 50, key is test_row_0/C:col10/1732101564052/Put/seqid=0 2024-11-20T11:19:25,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741913_1089 (size=12301) 2024-11-20T11:19:25,121 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/3a9f22f770eb46c3973dc3fda0eab7b7 2024-11-20T11:19:25,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/50527ba70541483d9bebeb50d7902814 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/50527ba70541483d9bebeb50d7902814 2024-11-20T11:19:25,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/50527ba70541483d9bebeb50d7902814, entries=250, sequenceid=372, filesize=16.8 K 2024-11-20T11:19:25,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/e3dd528907e4474aab72dff1ea687fdd as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e3dd528907e4474aab72dff1ea687fdd 2024-11-20T11:19:25,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e3dd528907e4474aab72dff1ea687fdd, entries=150, sequenceid=372, filesize=12.0 K 2024-11-20T11:19:25,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/3a9f22f770eb46c3973dc3fda0eab7b7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/3a9f22f770eb46c3973dc3fda0eab7b7 2024-11-20T11:19:25,154 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/3a9f22f770eb46c3973dc3fda0eab7b7, entries=150, sequenceid=372, filesize=12.0 K 2024-11-20T11:19:25,155 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 96d866d8db5bf8a73bb64ed0351e8f75 in 486ms, sequenceid=372, compaction requested=true 2024-11-20T11:19:25,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:25,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:25,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:25,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:25,155 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-20T11:19:25,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:25,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:25,156 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-20T11:19:25,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:25,164 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 74454 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-20T11:19:25,164 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 84214 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-20T11:19:25,164 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/B is initiating minor compaction (all files) 2024-11-20T11:19:25,164 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/A is initiating minor compaction (all files) 2024-11-20T11:19:25,164 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/B in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:25,164 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/A in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:25,164 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/29ffe433ee814188b39da1f7ba079908, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/67b2bee4864a42b1919fe729aedd78ee, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/50fb24cc835d469ea284f5bfbe700f9b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b2dd27461dc843549125c24ed07abfb4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/0df1b55b85304e98adde93a65c3bc0ae, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/50527ba70541483d9bebeb50d7902814] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=82.2 K 2024-11-20T11:19:25,164 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/bf7773ba5c42487fb048d0ef74a06ec9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/213eff694aa943e1aee2d8c148bab137, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1b0bb4abea144f1ab6199df1e8c5824e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f2d8e51821f647d0a1d66d9ac292617f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/907a04771e0f4ae1baf53635b6a79454, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e3dd528907e4474aab72dff1ea687fdd] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=72.7 K 2024-11-20T11:19:25,165 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29ffe433ee814188b39da1f7ba079908, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732101561091 2024-11-20T11:19:25,166 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting bf7773ba5c42487fb048d0ef74a06ec9, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732101561091 2024-11-20T11:19:25,166 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67b2bee4864a42b1919fe729aedd78ee, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732101561732 2024-11-20T11:19:25,167 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 213eff694aa943e1aee2d8c148bab137, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732101561732 2024-11-20T11:19:25,167 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50fb24cc835d469ea284f5bfbe700f9b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732101561911 2024-11-20T11:19:25,168 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b0bb4abea144f1ab6199df1e8c5824e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732101561911 2024-11-20T11:19:25,169 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2dd27461dc843549125c24ed07abfb4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732101562580 2024-11-20T11:19:25,169 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting f2d8e51821f647d0a1d66d9ac292617f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732101562580 2024-11-20T11:19:25,170 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0df1b55b85304e98adde93a65c3bc0ae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732101562896 2024-11-20T11:19:25,170 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 907a04771e0f4ae1baf53635b6a79454, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732101562896 2024-11-20T11:19:25,171 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50527ba70541483d9bebeb50d7902814, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732101564042 2024-11-20T11:19:25,171 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting e3dd528907e4474aab72dff1ea687fdd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732101564042 2024-11-20T11:19:25,193 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#A#compaction#75 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:25,193 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/4e5bfbf48c4a4eab80e5b3877f03d426 is 50, key is test_row_0/A:col10/1732101564052/Put/seqid=0 2024-11-20T11:19:25,194 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#B#compaction#76 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:25,195 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/9fd93d9511a3473dadf978c620263dcb is 50, key is test_row_0/B:col10/1732101564052/Put/seqid=0 2024-11-20T11:19:25,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741914_1090 (size=13153) 2024-11-20T11:19:25,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741915_1091 (size=13153) 2024-11-20T11:19:25,212 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/4e5bfbf48c4a4eab80e5b3877f03d426 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4e5bfbf48c4a4eab80e5b3877f03d426 2024-11-20T11:19:25,219 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/9fd93d9511a3473dadf978c620263dcb as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/9fd93d9511a3473dadf978c620263dcb 2024-11-20T11:19:25,220 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/A of 96d866d8db5bf8a73bb64ed0351e8f75 into 4e5bfbf48c4a4eab80e5b3877f03d426(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:25,220 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:25,220 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/A, priority=10, startTime=1732101565155; duration=0sec 2024-11-20T11:19:25,220 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:25,220 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:A 2024-11-20T11:19:25,221 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-20T11:19:25,224 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 74454 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-20T11:19:25,224 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/C is initiating minor compaction (all files) 2024-11-20T11:19:25,224 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/C in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:25,224 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1d11a2a86b474be8ada819459efbb6a0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9f6bf422607c4ec88f6cdbed2b376c01, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/4626c1292a1b410484a7b1d74754688e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1a9e8c95ee9544f7a70324f3a2371103, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ad56fee419144ffdb0da9584ae227b1e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/3a9f22f770eb46c3973dc3fda0eab7b7] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=72.7 K 2024-11-20T11:19:25,225 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d11a2a86b474be8ada819459efbb6a0, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732101561091 2024-11-20T11:19:25,225 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f6bf422607c4ec88f6cdbed2b376c01, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732101561732 2024-11-20T11:19:25,226 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4626c1292a1b410484a7b1d74754688e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732101561911 2024-11-20T11:19:25,226 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a9e8c95ee9544f7a70324f3a2371103, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732101562580 2024-11-20T11:19:25,227 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/B of 96d866d8db5bf8a73bb64ed0351e8f75 into 9fd93d9511a3473dadf978c620263dcb(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:25,227 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:25,227 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/B, priority=10, startTime=1732101565155; duration=0sec 2024-11-20T11:19:25,227 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad56fee419144ffdb0da9584ae227b1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732101562896 2024-11-20T11:19:25,227 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:25,227 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:B 2024-11-20T11:19:25,228 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a9f22f770eb46c3973dc3fda0eab7b7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732101564042 2024-11-20T11:19:25,246 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#C#compaction#77 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:25,248 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/4a8b3a47840d40639a10d416758956ae is 50, key is test_row_0/C:col10/1732101564052/Put/seqid=0 2024-11-20T11:19:25,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T11:19:25,254 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T11:19:25,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:25,257 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T11:19:25,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:25,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:25,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:25,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:25,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:25,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:25,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741916_1092 (size=13153) 2024-11-20T11:19:25,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/0fa121b129774edaad3b79c5fb111187 is 50, key is test_row_0/A:col10/1732101564683/Put/seqid=0 2024-11-20T11:19:25,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741917_1093 (size=12301) 2024-11-20T11:19:25,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:25,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:25,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101625328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101625329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101625330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101625331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101625332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101625434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101625434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101625435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101625435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101625438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T11:19:25,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101625638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101625638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101625638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101625640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101625640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,667 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/4a8b3a47840d40639a10d416758956ae as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/4a8b3a47840d40639a10d416758956ae 2024-11-20T11:19:25,673 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/0fa121b129774edaad3b79c5fb111187 2024-11-20T11:19:25,676 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/C of 96d866d8db5bf8a73bb64ed0351e8f75 into 4a8b3a47840d40639a10d416758956ae(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:25,676 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:25,676 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/C, priority=10, startTime=1732101565156; duration=0sec 2024-11-20T11:19:25,676 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:25,676 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:C 2024-11-20T11:19:25,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/2988d3a970e4446a90bba3f56dc15257 is 50, key is test_row_0/B:col10/1732101564683/Put/seqid=0 2024-11-20T11:19:25,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741918_1094 (size=12301) 2024-11-20T11:19:25,699 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/2988d3a970e4446a90bba3f56dc15257 2024-11-20T11:19:25,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/0d41bf0fc87546c39e7e5dd8a0fa866c is 50, key is test_row_0/C:col10/1732101564683/Put/seqid=0 2024-11-20T11:19:25,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741919_1095 (size=12301) 2024-11-20T11:19:25,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101625942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101625944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101625944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101625944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:25,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101625945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T11:19:26,119 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/0d41bf0fc87546c39e7e5dd8a0fa866c 2024-11-20T11:19:26,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/0fa121b129774edaad3b79c5fb111187 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/0fa121b129774edaad3b79c5fb111187 2024-11-20T11:19:26,133 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/0fa121b129774edaad3b79c5fb111187, entries=150, sequenceid=390, filesize=12.0 K 2024-11-20T11:19:26,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/2988d3a970e4446a90bba3f56dc15257 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/2988d3a970e4446a90bba3f56dc15257 2024-11-20T11:19:26,140 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/2988d3a970e4446a90bba3f56dc15257, entries=150, sequenceid=390, filesize=12.0 K 2024-11-20T11:19:26,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/0d41bf0fc87546c39e7e5dd8a0fa866c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/0d41bf0fc87546c39e7e5dd8a0fa866c 2024-11-20T11:19:26,150 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/0d41bf0fc87546c39e7e5dd8a0fa866c, entries=150, sequenceid=390, filesize=12.0 K 2024-11-20T11:19:26,151 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 96d866d8db5bf8a73bb64ed0351e8f75 in 896ms, sequenceid=390, compaction requested=false 2024-11-20T11:19:26,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:26,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:26,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-20T11:19:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-20T11:19:26,155 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-20T11:19:26,155 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2030 sec 2024-11-20T11:19:26,157 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.2110 sec 2024-11-20T11:19:26,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:26,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T11:19:26,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:26,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:26,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:26,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:26,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:26,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:26,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/4d3bb0b327004175aca3bc0c768ac6fc is 50, key is test_row_0/A:col10/1732101566448/Put/seqid=0 2024-11-20T11:19:26,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101626463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101626463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101626464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101626465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101626470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741920_1096 (size=14741) 2024-11-20T11:19:26,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/4d3bb0b327004175aca3bc0c768ac6fc 2024-11-20T11:19:26,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/70f5e46db9fa45e495f9d39ba1ebcca0 is 50, key is test_row_0/B:col10/1732101566448/Put/seqid=0 2024-11-20T11:19:26,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741921_1097 (size=12301) 2024-11-20T11:19:26,507 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/70f5e46db9fa45e495f9d39ba1ebcca0 2024-11-20T11:19:26,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/ffd19f0c32124e7699754a81dff57619 is 50, key is test_row_0/C:col10/1732101566448/Put/seqid=0 2024-11-20T11:19:26,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741922_1098 (size=12301) 2024-11-20T11:19:26,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101626567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101626568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101626569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101626572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101626574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101626770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101626771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101626772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101626774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:26,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101626775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:26,942 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/ffd19f0c32124e7699754a81dff57619 2024-11-20T11:19:26,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/4d3bb0b327004175aca3bc0c768ac6fc as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4d3bb0b327004175aca3bc0c768ac6fc 2024-11-20T11:19:26,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4d3bb0b327004175aca3bc0c768ac6fc, entries=200, sequenceid=414, filesize=14.4 K 2024-11-20T11:19:26,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/70f5e46db9fa45e495f9d39ba1ebcca0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/70f5e46db9fa45e495f9d39ba1ebcca0 2024-11-20T11:19:26,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/70f5e46db9fa45e495f9d39ba1ebcca0, entries=150, sequenceid=414, filesize=12.0 K 2024-11-20T11:19:26,963 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T11:19:26,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/ffd19f0c32124e7699754a81dff57619 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ffd19f0c32124e7699754a81dff57619 2024-11-20T11:19:26,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ffd19f0c32124e7699754a81dff57619, entries=150, sequenceid=414, filesize=12.0 K 2024-11-20T11:19:26,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 96d866d8db5bf8a73bb64ed0351e8f75 in 522ms, sequenceid=414, compaction requested=true 2024-11-20T11:19:26,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:26,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:26,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:26,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:26,971 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:26,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:26,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:26,972 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:26,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:26,973 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:26,973 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:26,973 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/B is initiating minor compaction (all files) 2024-11-20T11:19:26,973 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/A is initiating minor compaction (all files) 2024-11-20T11:19:26,973 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/B in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:26,973 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/A in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:26,973 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/9fd93d9511a3473dadf978c620263dcb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/2988d3a970e4446a90bba3f56dc15257, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/70f5e46db9fa45e495f9d39ba1ebcca0] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=36.9 K 2024-11-20T11:19:26,973 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4e5bfbf48c4a4eab80e5b3877f03d426, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/0fa121b129774edaad3b79c5fb111187, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4d3bb0b327004175aca3bc0c768ac6fc] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=39.3 K 2024-11-20T11:19:26,974 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fd93d9511a3473dadf978c620263dcb, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732101564042 2024-11-20T11:19:26,974 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e5bfbf48c4a4eab80e5b3877f03d426, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732101564042 2024-11-20T11:19:26,974 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fa121b129774edaad3b79c5fb111187, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732101564683 2024-11-20T11:19:26,975 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 2988d3a970e4446a90bba3f56dc15257, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732101564683 2024-11-20T11:19:26,975 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d3bb0b327004175aca3bc0c768ac6fc, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732101565329 2024-11-20T11:19:26,975 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 70f5e46db9fa45e495f9d39ba1ebcca0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732101566448 2024-11-20T11:19:26,986 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#B#compaction#84 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:26,986 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/e669ee7e73724073a18bca5826da7a32 is 50, key is test_row_0/B:col10/1732101566448/Put/seqid=0 2024-11-20T11:19:26,990 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#A#compaction#85 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:26,992 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/91ba36568bf34ea0ad84832595f2daf9 is 50, key is test_row_0/A:col10/1732101566448/Put/seqid=0 2024-11-20T11:19:26,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741923_1099 (size=13255) 2024-11-20T11:19:26,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741924_1100 (size=13255) 2024-11-20T11:19:27,012 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/e669ee7e73724073a18bca5826da7a32 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e669ee7e73724073a18bca5826da7a32 2024-11-20T11:19:27,021 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/B of 96d866d8db5bf8a73bb64ed0351e8f75 into e669ee7e73724073a18bca5826da7a32(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:27,021 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:27,021 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/B, priority=13, startTime=1732101566971; duration=0sec 2024-11-20T11:19:27,021 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:27,021 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:B 2024-11-20T11:19:27,021 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:27,023 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:27,023 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/C is initiating minor compaction (all files) 2024-11-20T11:19:27,024 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/C in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:27,024 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/4a8b3a47840d40639a10d416758956ae, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/0d41bf0fc87546c39e7e5dd8a0fa866c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ffd19f0c32124e7699754a81dff57619] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=36.9 K 2024-11-20T11:19:27,024 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a8b3a47840d40639a10d416758956ae, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732101564042 2024-11-20T11:19:27,026 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d41bf0fc87546c39e7e5dd8a0fa866c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732101564683 2024-11-20T11:19:27,026 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting ffd19f0c32124e7699754a81dff57619, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732101566448 2024-11-20T11:19:27,036 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#C#compaction#86 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:27,036 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/a0eab9c02289416ba882354aa9ffb8d1 is 50, key is test_row_0/C:col10/1732101566448/Put/seqid=0 2024-11-20T11:19:27,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741925_1101 (size=13255) 2024-11-20T11:19:27,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T11:19:27,051 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-20T11:19:27,053 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:27,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-20T11:19:27,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T11:19:27,054 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:27,055 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:27,055 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:27,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:27,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T11:19:27,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:27,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:27,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:27,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:27,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:27,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:27,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/c1cf41e01f844a0688fb33d8c81e06e8 is 50, key is test_row_0/A:col10/1732101566463/Put/seqid=0 2024-11-20T11:19:27,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741926_1102 (size=14741) 2024-11-20T11:19:27,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/c1cf41e01f844a0688fb33d8c81e06e8 2024-11-20T11:19:27,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101627095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101627096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101627097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101627098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101627098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/44081c28a26544c19a457c587c9b24ce is 50, key is test_row_0/B:col10/1732101566463/Put/seqid=0 2024-11-20T11:19:27,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741927_1103 (size=12301) 2024-11-20T11:19:27,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T11:19:27,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101627200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101627200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101627200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101627204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101627205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,207 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T11:19:27,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:27,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:27,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:27,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T11:19:27,361 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T11:19:27,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:27,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:27,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:27,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101627403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101627403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101627405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,406 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/91ba36568bf34ea0ad84832595f2daf9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/91ba36568bf34ea0ad84832595f2daf9 2024-11-20T11:19:27,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101627406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101627408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,415 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/A of 96d866d8db5bf8a73bb64ed0351e8f75 into 91ba36568bf34ea0ad84832595f2daf9(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:27,415 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:27,415 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/A, priority=13, startTime=1732101566971; duration=0sec 2024-11-20T11:19:27,415 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:27,415 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:A 2024-11-20T11:19:27,451 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/a0eab9c02289416ba882354aa9ffb8d1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a0eab9c02289416ba882354aa9ffb8d1 2024-11-20T11:19:27,458 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/C of 96d866d8db5bf8a73bb64ed0351e8f75 into a0eab9c02289416ba882354aa9ffb8d1(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:27,458 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:27,458 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/C, priority=13, startTime=1732101566972; duration=0sec 2024-11-20T11:19:27,458 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:27,458 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:C 2024-11-20T11:19:27,515 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T11:19:27,515 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/44081c28a26544c19a457c587c9b24ce 2024-11-20T11:19:27,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:27,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:27,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:27,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/f9c7ffd28e7142f58c7bffe11b3af935 is 50, key is test_row_0/C:col10/1732101566463/Put/seqid=0 2024-11-20T11:19:27,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741928_1104 (size=12301) 2024-11-20T11:19:27,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T11:19:27,669 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T11:19:27,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:27,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:27,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:27,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101627705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101627706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101627708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101627708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:27,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101627711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,823 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T11:19:27,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:27,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:27,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:27,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:27,935 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/f9c7ffd28e7142f58c7bffe11b3af935 2024-11-20T11:19:27,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/c1cf41e01f844a0688fb33d8c81e06e8 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/c1cf41e01f844a0688fb33d8c81e06e8 2024-11-20T11:19:27,947 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/c1cf41e01f844a0688fb33d8c81e06e8, entries=200, sequenceid=429, filesize=14.4 K 2024-11-20T11:19:27,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/44081c28a26544c19a457c587c9b24ce as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/44081c28a26544c19a457c587c9b24ce 2024-11-20T11:19:27,953 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/44081c28a26544c19a457c587c9b24ce, entries=150, sequenceid=429, filesize=12.0 K 2024-11-20T11:19:27,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/f9c7ffd28e7142f58c7bffe11b3af935 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/f9c7ffd28e7142f58c7bffe11b3af935 2024-11-20T11:19:27,959 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/f9c7ffd28e7142f58c7bffe11b3af935, entries=150, sequenceid=429, filesize=12.0 K 2024-11-20T11:19:27,961 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 96d866d8db5bf8a73bb64ed0351e8f75 in 885ms, sequenceid=429, compaction requested=false 2024-11-20T11:19:27,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:27,976 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:27,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T11:19:27,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:27,977 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T11:19:27,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:27,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:27,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:27,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:27,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:27,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:27,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/fcfdac169f8f414486ffb9d825b6df11 is 50, key is test_row_0/A:col10/1732101567096/Put/seqid=0 2024-11-20T11:19:27,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741929_1105 (size=12301) 2024-11-20T11:19:28,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T11:19:28,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:28,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:28,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101628221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101628222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101628222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101628224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101628224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101628325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101628325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101628326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101628329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101628329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,399 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/fcfdac169f8f414486ffb9d825b6df11 2024-11-20T11:19:28,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/319bb4a76a604990935ceff0b3fc4ad2 is 50, key is test_row_0/B:col10/1732101567096/Put/seqid=0 2024-11-20T11:19:28,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741930_1106 (size=12301) 2024-11-20T11:19:28,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101628528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101628528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101628527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101628530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101628534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,814 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/319bb4a76a604990935ceff0b3fc4ad2 2024-11-20T11:19:28,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/f6f6583e1f17412590be60dd224e77cb is 50, key is test_row_0/C:col10/1732101567096/Put/seqid=0 2024-11-20T11:19:28,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741931_1107 (size=12301) 2024-11-20T11:19:28,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101628833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101628834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101628834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101628835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:28,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:28,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101628839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T11:19:29,235 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/f6f6583e1f17412590be60dd224e77cb 2024-11-20T11:19:29,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/fcfdac169f8f414486ffb9d825b6df11 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/fcfdac169f8f414486ffb9d825b6df11 2024-11-20T11:19:29,248 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/fcfdac169f8f414486ffb9d825b6df11, entries=150, sequenceid=454, filesize=12.0 K 2024-11-20T11:19:29,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/319bb4a76a604990935ceff0b3fc4ad2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/319bb4a76a604990935ceff0b3fc4ad2 2024-11-20T11:19:29,257 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/319bb4a76a604990935ceff0b3fc4ad2, entries=150, sequenceid=454, filesize=12.0 K 2024-11-20T11:19:29,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/f6f6583e1f17412590be60dd224e77cb as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/f6f6583e1f17412590be60dd224e77cb 2024-11-20T11:19:29,264 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/f6f6583e1f17412590be60dd224e77cb, entries=150, sequenceid=454, filesize=12.0 K 2024-11-20T11:19:29,265 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 96d866d8db5bf8a73bb64ed0351e8f75 in 1288ms, sequenceid=454, compaction requested=true 2024-11-20T11:19:29,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:29,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:29,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-20T11:19:29,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-20T11:19:29,269 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-20T11:19:29,269 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2120 sec 2024-11-20T11:19:29,270 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 2.2160 sec 2024-11-20T11:19:29,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:29,339 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T11:19:29,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:29,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:29,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:29,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:29,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:29,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:29,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/3dfd18ae1e9746cdb7d8319c2d01ecc2 is 50, key is test_row_0/A:col10/1732101568220/Put/seqid=0 2024-11-20T11:19:29,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741932_1108 (size=14741) 2024-11-20T11:19:29,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101629356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101629354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101629357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101629375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101629376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101629477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101629477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101629477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101629478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101629484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101629680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101629680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101629681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101629683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101629686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,752 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=470 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/3dfd18ae1e9746cdb7d8319c2d01ecc2 2024-11-20T11:19:29,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/c1c6eb21f24c427ea97b99cf562c7417 is 50, key is test_row_0/B:col10/1732101568220/Put/seqid=0 2024-11-20T11:19:29,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741933_1109 (size=12301) 2024-11-20T11:19:29,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101629984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101629985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101629985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101629987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:29,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:29,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101629990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:30,167 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=470 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/c1c6eb21f24c427ea97b99cf562c7417 2024-11-20T11:19:30,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/c4c1f0e6f0804c74a051560083278b2f is 50, key is test_row_0/C:col10/1732101568220/Put/seqid=0 2024-11-20T11:19:30,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741934_1110 (size=12301) 2024-11-20T11:19:30,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101630489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:30,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:30,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101630490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101630490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:30,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101630492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:30,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101630494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:30,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=470 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/c4c1f0e6f0804c74a051560083278b2f 2024-11-20T11:19:30,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/3dfd18ae1e9746cdb7d8319c2d01ecc2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/3dfd18ae1e9746cdb7d8319c2d01ecc2 2024-11-20T11:19:30,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/3dfd18ae1e9746cdb7d8319c2d01ecc2, entries=200, sequenceid=470, filesize=14.4 K 2024-11-20T11:19:30,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/c1c6eb21f24c427ea97b99cf562c7417 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/c1c6eb21f24c427ea97b99cf562c7417 2024-11-20T11:19:30,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/c1c6eb21f24c427ea97b99cf562c7417, entries=150, sequenceid=470, filesize=12.0 K 2024-11-20T11:19:30,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/c4c1f0e6f0804c74a051560083278b2f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/c4c1f0e6f0804c74a051560083278b2f 2024-11-20T11:19:30,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/c4c1f0e6f0804c74a051560083278b2f, entries=150, sequenceid=470, filesize=12.0 K 2024-11-20T11:19:30,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 96d866d8db5bf8a73bb64ed0351e8f75 in 1268ms, sequenceid=470, compaction requested=true 2024-11-20T11:19:30,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:30,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:30,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:30,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:30,608 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:19:30,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:30,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:30,608 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:19:30,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:30,609 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55038 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:19:30,609 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:19:30,609 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/B is initiating minor compaction (all files) 2024-11-20T11:19:30,609 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/A is initiating minor compaction (all files) 2024-11-20T11:19:30,609 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/B in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:30,609 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/A in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:30,609 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e669ee7e73724073a18bca5826da7a32, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/44081c28a26544c19a457c587c9b24ce, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/319bb4a76a604990935ceff0b3fc4ad2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/c1c6eb21f24c427ea97b99cf562c7417] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=49.0 K 2024-11-20T11:19:30,610 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/91ba36568bf34ea0ad84832595f2daf9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/c1cf41e01f844a0688fb33d8c81e06e8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/fcfdac169f8f414486ffb9d825b6df11, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/3dfd18ae1e9746cdb7d8319c2d01ecc2] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=53.7 K 2024-11-20T11:19:30,610 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting e669ee7e73724073a18bca5826da7a32, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732101566448 2024-11-20T11:19:30,610 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91ba36568bf34ea0ad84832595f2daf9, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732101566448 2024-11-20T11:19:30,610 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 44081c28a26544c19a457c587c9b24ce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1732101566459 2024-11-20T11:19:30,610 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1cf41e01f844a0688fb33d8c81e06e8, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1732101566454 2024-11-20T11:19:30,611 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 319bb4a76a604990935ceff0b3fc4ad2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732101567091 2024-11-20T11:19:30,611 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting fcfdac169f8f414486ffb9d825b6df11, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732101567091 2024-11-20T11:19:30,611 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3dfd18ae1e9746cdb7d8319c2d01ecc2, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=470, earliestPutTs=1732101568220 2024-11-20T11:19:30,611 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting c1c6eb21f24c427ea97b99cf562c7417, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=470, earliestPutTs=1732101568220 2024-11-20T11:19:30,623 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#A#compaction#97 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:30,623 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#B#compaction#96 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:30,624 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/3ab0c35de0ba4693bbc76dcc71e76ac6 is 50, key is test_row_0/A:col10/1732101568220/Put/seqid=0 2024-11-20T11:19:30,624 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/ec2756f4e1c64aada9237c184819797d is 50, key is test_row_0/B:col10/1732101568220/Put/seqid=0 2024-11-20T11:19:30,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741935_1111 (size=13391) 2024-11-20T11:19:30,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741936_1112 (size=13391) 2024-11-20T11:19:30,640 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/3ab0c35de0ba4693bbc76dcc71e76ac6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/3ab0c35de0ba4693bbc76dcc71e76ac6 2024-11-20T11:19:30,648 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/A of 96d866d8db5bf8a73bb64ed0351e8f75 into 3ab0c35de0ba4693bbc76dcc71e76ac6(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:30,648 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:30,648 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/A, priority=12, startTime=1732101570607; duration=0sec 2024-11-20T11:19:30,648 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:30,648 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:A 2024-11-20T11:19:30,648 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:19:30,650 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:19:30,651 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 96d866d8db5bf8a73bb64ed0351e8f75/C is initiating minor compaction (all files) 2024-11-20T11:19:30,651 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 96d866d8db5bf8a73bb64ed0351e8f75/C in TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:30,651 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a0eab9c02289416ba882354aa9ffb8d1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/f9c7ffd28e7142f58c7bffe11b3af935, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/f6f6583e1f17412590be60dd224e77cb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/c4c1f0e6f0804c74a051560083278b2f] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp, totalSize=49.0 K 2024-11-20T11:19:30,652 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0eab9c02289416ba882354aa9ffb8d1, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732101566448 2024-11-20T11:19:30,653 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9c7ffd28e7142f58c7bffe11b3af935, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1732101566459 2024-11-20T11:19:30,653 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6f6583e1f17412590be60dd224e77cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732101567091 2024-11-20T11:19:30,654 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4c1f0e6f0804c74a051560083278b2f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=470, earliestPutTs=1732101568220 2024-11-20T11:19:30,664 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 96d866d8db5bf8a73bb64ed0351e8f75#C#compaction#98 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:30,665 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/600f893495e64fed89b882c223b7ed02 is 50, key is test_row_0/C:col10/1732101568220/Put/seqid=0 2024-11-20T11:19:30,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741937_1113 (size=13391) 2024-11-20T11:19:30,678 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/600f893495e64fed89b882c223b7ed02 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/600f893495e64fed89b882c223b7ed02 2024-11-20T11:19:30,686 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/C of 96d866d8db5bf8a73bb64ed0351e8f75 into 600f893495e64fed89b882c223b7ed02(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:30,686 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:30,686 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/C, priority=12, startTime=1732101570608; duration=0sec 2024-11-20T11:19:30,686 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:30,687 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:C 2024-11-20T11:19:31,039 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/ec2756f4e1c64aada9237c184819797d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/ec2756f4e1c64aada9237c184819797d 2024-11-20T11:19:31,045 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 96d866d8db5bf8a73bb64ed0351e8f75/B of 96d866d8db5bf8a73bb64ed0351e8f75 into ec2756f4e1c64aada9237c184819797d(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:31,045 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:31,046 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75., storeName=96d866d8db5bf8a73bb64ed0351e8f75/B, priority=12, startTime=1732101570607; duration=0sec 2024-11-20T11:19:31,046 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:31,046 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:B 2024-11-20T11:19:31,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T11:19:31,160 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-20T11:19:31,161 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:31,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-20T11:19:31,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T11:19:31,163 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:31,164 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:31,164 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:31,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T11:19:31,316 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T11:19:31,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:31,317 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T11:19:31,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:31,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:31,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:31,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:31,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:31,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:31,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/097675611d16440a96082d251d744742 is 50, key is test_row_0/A:col10/1732101569354/Put/seqid=0 2024-11-20T11:19:31,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741938_1114 (size=12301) 2024-11-20T11:19:31,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T11:19:31,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:31,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:31,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101631504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101631506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101631507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101631507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101631508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101631609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101631609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101631610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101631611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101631611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,728 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/097675611d16440a96082d251d744742 2024-11-20T11:19:31,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/fdf0625f0ac34dddb89b71ff2747e48b is 50, key is test_row_0/B:col10/1732101569354/Put/seqid=0 2024-11-20T11:19:31,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741939_1115 (size=12301) 2024-11-20T11:19:31,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T11:19:31,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101631811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101631813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101631814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101631814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:31,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:31,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101631815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101632112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101632118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101632118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101632119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101632119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,144 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/fdf0625f0ac34dddb89b71ff2747e48b 2024-11-20T11:19:32,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/306d5e42ae70430dbb0b1f0d2acf0a5f is 50, key is test_row_0/C:col10/1732101569354/Put/seqid=0 2024-11-20T11:19:32,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741940_1116 (size=12301) 2024-11-20T11:19:32,164 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/306d5e42ae70430dbb0b1f0d2acf0a5f 2024-11-20T11:19:32,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/097675611d16440a96082d251d744742 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/097675611d16440a96082d251d744742 2024-11-20T11:19:32,175 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/097675611d16440a96082d251d744742, entries=150, sequenceid=494, filesize=12.0 K 2024-11-20T11:19:32,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/fdf0625f0ac34dddb89b71ff2747e48b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/fdf0625f0ac34dddb89b71ff2747e48b 2024-11-20T11:19:32,181 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/fdf0625f0ac34dddb89b71ff2747e48b, entries=150, sequenceid=494, filesize=12.0 K 2024-11-20T11:19:32,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/306d5e42ae70430dbb0b1f0d2acf0a5f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/306d5e42ae70430dbb0b1f0d2acf0a5f 2024-11-20T11:19:32,188 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/306d5e42ae70430dbb0b1f0d2acf0a5f, entries=150, sequenceid=494, filesize=12.0 K 2024-11-20T11:19:32,189 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 96d866d8db5bf8a73bb64ed0351e8f75 in 872ms, sequenceid=494, compaction requested=false 2024-11-20T11:19:32,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:32,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:32,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-20T11:19:32,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-20T11:19:32,192 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-20T11:19:32,192 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0260 sec 2024-11-20T11:19:32,194 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 1.0320 sec 2024-11-20T11:19:32,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T11:19:32,268 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-20T11:19:32,269 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:32,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees 2024-11-20T11:19:32,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T11:19:32,271 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:32,272 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:32,272 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:32,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T11:19:32,423 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-20T11:19:32,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:32,425 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T11:19:32,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:32,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:32,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:32,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:32,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:32,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:32,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/e45e5d877c5f427caaf5c023ca5b6211 is 50, key is test_row_0/A:col10/1732101571503/Put/seqid=0 2024-11-20T11:19:32,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741941_1117 (size=12301) 2024-11-20T11:19:32,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T11:19:32,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:32,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. as already flushing 2024-11-20T11:19:32,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101632633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101632633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101632635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101632639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101632639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101632740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101632740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101632740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101632743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101632743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,836 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/e45e5d877c5f427caaf5c023ca5b6211 2024-11-20T11:19:32,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/5ba9fb467f1a4f7eaf3c65ea150aaa71 is 50, key is test_row_0/B:col10/1732101571503/Put/seqid=0 2024-11-20T11:19:32,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741942_1118 (size=12301) 2024-11-20T11:19:32,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T11:19:32,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101632943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101632943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101632944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101632945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:32,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:32,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101632945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:33,063 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70267494 to 127.0.0.1:62733 2024-11-20T11:19:33,063 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c915d17 to 127.0.0.1:62733 2024-11-20T11:19:33,063 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:62733 2024-11-20T11:19:33,063 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:33,063 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:33,063 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:33,064 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:62733 2024-11-20T11:19:33,064 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:33,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:33,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48156 deadline: 1732101633246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:33,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:33,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48158 deadline: 1732101633246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:33,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:33,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48122 deadline: 1732101633247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:33,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:33,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48134 deadline: 1732101633248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:33,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:33,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48174 deadline: 1732101633248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:33,251 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/5ba9fb467f1a4f7eaf3c65ea150aaa71 2024-11-20T11:19:33,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/29f76ca7d4fd4e2b998a525acda3da90 is 50, key is test_row_0/C:col10/1732101571503/Put/seqid=0 2024-11-20T11:19:33,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741943_1119 (size=12301) 2024-11-20T11:19:33,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T11:19:33,664 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/29f76ca7d4fd4e2b998a525acda3da90 2024-11-20T11:19:33,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/e45e5d877c5f427caaf5c023ca5b6211 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/e45e5d877c5f427caaf5c023ca5b6211 2024-11-20T11:19:33,673 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/e45e5d877c5f427caaf5c023ca5b6211, entries=150, sequenceid=510, filesize=12.0 K 2024-11-20T11:19:33,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/5ba9fb467f1a4f7eaf3c65ea150aaa71 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/5ba9fb467f1a4f7eaf3c65ea150aaa71 2024-11-20T11:19:33,678 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/5ba9fb467f1a4f7eaf3c65ea150aaa71, entries=150, sequenceid=510, filesize=12.0 K 2024-11-20T11:19:33,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/29f76ca7d4fd4e2b998a525acda3da90 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/29f76ca7d4fd4e2b998a525acda3da90 2024-11-20T11:19:33,683 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/29f76ca7d4fd4e2b998a525acda3da90, entries=150, sequenceid=510, filesize=12.0 K 2024-11-20T11:19:33,684 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 96d866d8db5bf8a73bb64ed0351e8f75 in 1260ms, sequenceid=510, compaction requested=true 2024-11-20T11:19:33,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:33,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:33,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-20T11:19:33,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=35 2024-11-20T11:19:33,686 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-20T11:19:33,686 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4130 sec 2024-11-20T11:19:33,688 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees in 1.4180 sec 2024-11-20T11:19:33,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:33,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T11:19:33,750 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5400112e to 127.0.0.1:62733 2024-11-20T11:19:33,750 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:33,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:33,751 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:62733 2024-11-20T11:19:33,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:33,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:33,751 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:33,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:33,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:33,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:33,753 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22cb07dd to 127.0.0.1:62733 2024-11-20T11:19:33,753 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:33,754 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:62733 2024-11-20T11:19:33,754 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:33,755 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38766d64 to 127.0.0.1:62733 2024-11-20T11:19:33,755 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:33,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/90790e9a1f704bb7a3652df571dba63c is 50, key is test_row_0/A:col10/1732101573750/Put/seqid=0 2024-11-20T11:19:33,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741944_1120 (size=12301) 2024-11-20T11:19:34,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/90790e9a1f704bb7a3652df571dba63c 2024-11-20T11:19:34,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/b9eca8a340344ce7ae03647b748cbb68 is 50, key is test_row_0/B:col10/1732101573750/Put/seqid=0 2024-11-20T11:19:34,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741945_1121 (size=12301) 2024-11-20T11:19:34,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T11:19:34,380 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 83 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 82 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6594 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6526 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2887 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8659 rows 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2855 2024-11-20T11:19:34,380 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8564 rows 2024-11-20T11:19:34,381 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T11:19:34,381 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f6e36fe to 127.0.0.1:62733 2024-11-20T11:19:34,381 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:34,387 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T11:19:34,391 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T11:19:34,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T11:19:34,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T11:19:34,398 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101574398"}]},"ts":"1732101574398"} 2024-11-20T11:19:34,399 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T11:19:34,402 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T11:19:34,404 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T11:19:34,408 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=96d866d8db5bf8a73bb64ed0351e8f75, UNASSIGN}] 2024-11-20T11:19:34,408 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=96d866d8db5bf8a73bb64ed0351e8f75, UNASSIGN 2024-11-20T11:19:34,409 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=96d866d8db5bf8a73bb64ed0351e8f75, regionState=CLOSING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:34,411 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T11:19:34,411 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; CloseRegionProcedure 96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:19:34,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T11:19:34,566 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:34,568 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(124): Close 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:34,568 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T11:19:34,569 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1681): Closing 96d866d8db5bf8a73bb64ed0351e8f75, disabling compactions & flushes 2024-11-20T11:19:34,569 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:34,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/b9eca8a340344ce7ae03647b748cbb68 2024-11-20T11:19:34,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/1e8f681214a449a585f7c6e3fb393eb2 is 50, key is test_row_0/C:col10/1732101573750/Put/seqid=0 2024-11-20T11:19:34,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741946_1122 (size=12301) 2024-11-20T11:19:34,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T11:19:34,988 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/1e8f681214a449a585f7c6e3fb393eb2 2024-11-20T11:19:34,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/90790e9a1f704bb7a3652df571dba63c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/90790e9a1f704bb7a3652df571dba63c 2024-11-20T11:19:34,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/90790e9a1f704bb7a3652df571dba63c, entries=150, sequenceid=532, filesize=12.0 K 2024-11-20T11:19:34,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/b9eca8a340344ce7ae03647b748cbb68 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/b9eca8a340344ce7ae03647b748cbb68 2024-11-20T11:19:35,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T11:19:35,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/b9eca8a340344ce7ae03647b748cbb68, entries=150, sequenceid=532, filesize=12.0 K 2024-11-20T11:19:35,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/1e8f681214a449a585f7c6e3fb393eb2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1e8f681214a449a585f7c6e3fb393eb2 2024-11-20T11:19:35,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1e8f681214a449a585f7c6e3fb393eb2, entries=150, sequenceid=532, filesize=12.0 K 2024-11-20T11:19:35,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=20.13 KB/20610 for 96d866d8db5bf8a73bb64ed0351e8f75 in 1258ms, sequenceid=532, compaction requested=true 2024-11-20T11:19:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:35,008 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:35,008 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:35,008 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. because compaction request was cancelled 2024-11-20T11:19:35,008 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. after waiting 0 ms 2024-11-20T11:19:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:35,008 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. because compaction request was cancelled 2024-11-20T11:19:35,008 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:A 2024-11-20T11:19:35,008 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 96d866d8db5bf8a73bb64ed0351e8f75:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:35,008 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:B 2024-11-20T11:19:35,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:35,008 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. because compaction request was cancelled 2024-11-20T11:19:35,008 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 96d866d8db5bf8a73bb64ed0351e8f75:C 2024-11-20T11:19:35,008 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(2837): Flushing 96d866d8db5bf8a73bb64ed0351e8f75 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-20T11:19:35,009 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=A 2024-11-20T11:19:35,009 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:35,009 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=B 2024-11-20T11:19:35,009 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:35,009 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 96d866d8db5bf8a73bb64ed0351e8f75, store=C 2024-11-20T11:19:35,009 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:35,013 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/b1e1c533b8f744d98aca9eabd58e0e05 is 50, key is test_row_1/A:col10/1732101573754/Put/seqid=0 2024-11-20T11:19:35,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741947_1123 (size=9857) 2024-11-20T11:19:35,055 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T11:19:35,418 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=538 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/b1e1c533b8f744d98aca9eabd58e0e05 2024-11-20T11:19:35,426 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/e961611af35549d3b4a3f2831c2cacc0 is 50, key is test_row_1/B:col10/1732101573754/Put/seqid=0 2024-11-20T11:19:35,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741948_1124 (size=9857) 2024-11-20T11:19:35,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T11:19:35,830 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=538 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/e961611af35549d3b4a3f2831c2cacc0 2024-11-20T11:19:35,838 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/fba06dab766a4db599288590fdf26e6f is 50, key is test_row_1/C:col10/1732101573754/Put/seqid=0 2024-11-20T11:19:35,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741949_1125 (size=9857) 2024-11-20T11:19:36,242 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=538 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/fba06dab766a4db599288590fdf26e6f 2024-11-20T11:19:36,248 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/A/b1e1c533b8f744d98aca9eabd58e0e05 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b1e1c533b8f744d98aca9eabd58e0e05 2024-11-20T11:19:36,253 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b1e1c533b8f744d98aca9eabd58e0e05, entries=100, sequenceid=538, filesize=9.6 K 2024-11-20T11:19:36,254 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/B/e961611af35549d3b4a3f2831c2cacc0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e961611af35549d3b4a3f2831c2cacc0 2024-11-20T11:19:36,259 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e961611af35549d3b4a3f2831c2cacc0, entries=100, sequenceid=538, filesize=9.6 K 2024-11-20T11:19:36,260 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/.tmp/C/fba06dab766a4db599288590fdf26e6f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/fba06dab766a4db599288590fdf26e6f 2024-11-20T11:19:36,265 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/fba06dab766a4db599288590fdf26e6f, entries=100, sequenceid=538, filesize=9.6 K 2024-11-20T11:19:36,266 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 96d866d8db5bf8a73bb64ed0351e8f75 in 1258ms, sequenceid=538, compaction requested=true 2024-11-20T11:19:36,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/96cf97f634514f289c6d5ece56e9a3b2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/ed3e27bbad774df1a8562284c3e3fea0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/6b592734f8244edf9e6e0bd3719b3169, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/bc47c49dc08d4d908b8715a5f7c8f741, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b1307322d9d341aba59cc514ceb0fca0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/27bb6028c1ce4e59a311daf174ce6012, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/f40ccbc811a848e3ad4292df02ae3ecb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/2664d85c802443aeb9f839f1551cb0b7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/c04b4850729e450ca5ae7121d3b1c732, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/6b15e454cffe4bcab9c5a3d2e4a81192, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/8fc35313950a4d17bb4753076099bf8b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/1e2fd32782e746e0826a682ff2596fdf, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/5f396fe4595246599e26991c151e2b8f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/696f8e03bcb6471f89f2e4f2c5ff18ea, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b42bcc3d62f84c94b852287350d0f07f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/2a1d8d0e26ec44179496cdcedd24773d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4d0ff98e8c8c46c18d7dd899c12d8aa8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/7db94e993c234214bd6f4168eb887018, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/29ffe433ee814188b39da1f7ba079908, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/614791872a244a5781f47c585f847768, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/67b2bee4864a42b1919fe729aedd78ee, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/50fb24cc835d469ea284f5bfbe700f9b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b2dd27461dc843549125c24ed07abfb4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/0df1b55b85304e98adde93a65c3bc0ae, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/50527ba70541483d9bebeb50d7902814, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4e5bfbf48c4a4eab80e5b3877f03d426, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/0fa121b129774edaad3b79c5fb111187, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4d3bb0b327004175aca3bc0c768ac6fc, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/91ba36568bf34ea0ad84832595f2daf9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/c1cf41e01f844a0688fb33d8c81e06e8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/fcfdac169f8f414486ffb9d825b6df11, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/3dfd18ae1e9746cdb7d8319c2d01ecc2] to archive 2024-11-20T11:19:36,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:19:36,275 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/96cf97f634514f289c6d5ece56e9a3b2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/96cf97f634514f289c6d5ece56e9a3b2 2024-11-20T11:19:36,277 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/ed3e27bbad774df1a8562284c3e3fea0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/ed3e27bbad774df1a8562284c3e3fea0 2024-11-20T11:19:36,278 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/6b592734f8244edf9e6e0bd3719b3169 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/6b592734f8244edf9e6e0bd3719b3169 2024-11-20T11:19:36,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/bc47c49dc08d4d908b8715a5f7c8f741 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/bc47c49dc08d4d908b8715a5f7c8f741 2024-11-20T11:19:36,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b1307322d9d341aba59cc514ceb0fca0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b1307322d9d341aba59cc514ceb0fca0 2024-11-20T11:19:36,282 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/27bb6028c1ce4e59a311daf174ce6012 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/27bb6028c1ce4e59a311daf174ce6012 2024-11-20T11:19:36,284 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/f40ccbc811a848e3ad4292df02ae3ecb to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/f40ccbc811a848e3ad4292df02ae3ecb 2024-11-20T11:19:36,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/2664d85c802443aeb9f839f1551cb0b7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/2664d85c802443aeb9f839f1551cb0b7 2024-11-20T11:19:36,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/c04b4850729e450ca5ae7121d3b1c732 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/c04b4850729e450ca5ae7121d3b1c732 2024-11-20T11:19:36,288 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/6b15e454cffe4bcab9c5a3d2e4a81192 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/6b15e454cffe4bcab9c5a3d2e4a81192 2024-11-20T11:19:36,289 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/8fc35313950a4d17bb4753076099bf8b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/8fc35313950a4d17bb4753076099bf8b 2024-11-20T11:19:36,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/1e2fd32782e746e0826a682ff2596fdf to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/1e2fd32782e746e0826a682ff2596fdf 2024-11-20T11:19:36,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/5f396fe4595246599e26991c151e2b8f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/5f396fe4595246599e26991c151e2b8f 2024-11-20T11:19:36,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/696f8e03bcb6471f89f2e4f2c5ff18ea to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/696f8e03bcb6471f89f2e4f2c5ff18ea 2024-11-20T11:19:36,294 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b42bcc3d62f84c94b852287350d0f07f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b42bcc3d62f84c94b852287350d0f07f 2024-11-20T11:19:36,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/2a1d8d0e26ec44179496cdcedd24773d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/2a1d8d0e26ec44179496cdcedd24773d 2024-11-20T11:19:36,297 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4d0ff98e8c8c46c18d7dd899c12d8aa8 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4d0ff98e8c8c46c18d7dd899c12d8aa8 2024-11-20T11:19:36,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/7db94e993c234214bd6f4168eb887018 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/7db94e993c234214bd6f4168eb887018 2024-11-20T11:19:36,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/29ffe433ee814188b39da1f7ba079908 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/29ffe433ee814188b39da1f7ba079908 2024-11-20T11:19:36,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/614791872a244a5781f47c585f847768 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/614791872a244a5781f47c585f847768 2024-11-20T11:19:36,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/67b2bee4864a42b1919fe729aedd78ee to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/67b2bee4864a42b1919fe729aedd78ee 2024-11-20T11:19:36,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/50fb24cc835d469ea284f5bfbe700f9b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/50fb24cc835d469ea284f5bfbe700f9b 2024-11-20T11:19:36,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b2dd27461dc843549125c24ed07abfb4 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b2dd27461dc843549125c24ed07abfb4 2024-11-20T11:19:36,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/0df1b55b85304e98adde93a65c3bc0ae to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/0df1b55b85304e98adde93a65c3bc0ae 2024-11-20T11:19:36,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/50527ba70541483d9bebeb50d7902814 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/50527ba70541483d9bebeb50d7902814 2024-11-20T11:19:36,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4e5bfbf48c4a4eab80e5b3877f03d426 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4e5bfbf48c4a4eab80e5b3877f03d426 2024-11-20T11:19:36,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/0fa121b129774edaad3b79c5fb111187 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/0fa121b129774edaad3b79c5fb111187 2024-11-20T11:19:36,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4d3bb0b327004175aca3bc0c768ac6fc to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/4d3bb0b327004175aca3bc0c768ac6fc 2024-11-20T11:19:36,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/91ba36568bf34ea0ad84832595f2daf9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/91ba36568bf34ea0ad84832595f2daf9 2024-11-20T11:19:36,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/c1cf41e01f844a0688fb33d8c81e06e8 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/c1cf41e01f844a0688fb33d8c81e06e8 2024-11-20T11:19:36,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/fcfdac169f8f414486ffb9d825b6df11 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/fcfdac169f8f414486ffb9d825b6df11 2024-11-20T11:19:36,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/3dfd18ae1e9746cdb7d8319c2d01ecc2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/3dfd18ae1e9746cdb7d8319c2d01ecc2 2024-11-20T11:19:36,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/ac712f963da64ffabae219239abec686, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/324b2b752f6446169f2ee961efa00e83, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/331d39a94999402e9c22691f81ad90c1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/995be427326a4c9cb8bfa9e50f79a54d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f48086089cfc43fcb7e6bb98e7c76690, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/5fe9af9ce8c0442a8f34977ba23d82ed, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/16ff31a0809841d0ab2e69dfcecacc95, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1c61dda16a7b45bfa021df97c532d811, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/43565c5c242e4e81ba2806c1bd21cf37, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/cdbc1388f4804682b4738ad7ba260f0e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f64eec5ae9c9465ab9a88cce385ae183, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/15772da65d694dd0b2f982883bdaef5c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/b6fc4bd5857a4effb7c8bdfb5e5a6a75, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/74f1cb6d238a4c6fae0ff147dadfcd6c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/074fd5365c794660b55b14ced76d002b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/4308e971c0fe46b6b68c158125e0256c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1f94ed39ac4e48faab94cd454a8b609c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/baa86b0e39ff479abf77e3adb8d9536e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/bf7773ba5c42487fb048d0ef74a06ec9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/62bb63ba65ff4f1ba22e1370d2334cb7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/213eff694aa943e1aee2d8c148bab137, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1b0bb4abea144f1ab6199df1e8c5824e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f2d8e51821f647d0a1d66d9ac292617f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/907a04771e0f4ae1baf53635b6a79454, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/9fd93d9511a3473dadf978c620263dcb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e3dd528907e4474aab72dff1ea687fdd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/2988d3a970e4446a90bba3f56dc15257, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e669ee7e73724073a18bca5826da7a32, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/70f5e46db9fa45e495f9d39ba1ebcca0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/44081c28a26544c19a457c587c9b24ce, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/319bb4a76a604990935ceff0b3fc4ad2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/c1c6eb21f24c427ea97b99cf562c7417] to archive 2024-11-20T11:19:36,328 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:19:36,330 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/ac712f963da64ffabae219239abec686 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/ac712f963da64ffabae219239abec686 2024-11-20T11:19:36,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/324b2b752f6446169f2ee961efa00e83 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/324b2b752f6446169f2ee961efa00e83 2024-11-20T11:19:36,332 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/331d39a94999402e9c22691f81ad90c1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/331d39a94999402e9c22691f81ad90c1 2024-11-20T11:19:36,334 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/995be427326a4c9cb8bfa9e50f79a54d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/995be427326a4c9cb8bfa9e50f79a54d 2024-11-20T11:19:36,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f48086089cfc43fcb7e6bb98e7c76690 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f48086089cfc43fcb7e6bb98e7c76690 2024-11-20T11:19:36,336 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/5fe9af9ce8c0442a8f34977ba23d82ed to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/5fe9af9ce8c0442a8f34977ba23d82ed 2024-11-20T11:19:36,337 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/16ff31a0809841d0ab2e69dfcecacc95 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/16ff31a0809841d0ab2e69dfcecacc95 2024-11-20T11:19:36,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1c61dda16a7b45bfa021df97c532d811 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1c61dda16a7b45bfa021df97c532d811 2024-11-20T11:19:36,339 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/43565c5c242e4e81ba2806c1bd21cf37 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/43565c5c242e4e81ba2806c1bd21cf37 2024-11-20T11:19:36,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/cdbc1388f4804682b4738ad7ba260f0e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/cdbc1388f4804682b4738ad7ba260f0e 2024-11-20T11:19:36,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f64eec5ae9c9465ab9a88cce385ae183 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f64eec5ae9c9465ab9a88cce385ae183 2024-11-20T11:19:36,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/15772da65d694dd0b2f982883bdaef5c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/15772da65d694dd0b2f982883bdaef5c 2024-11-20T11:19:36,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/b6fc4bd5857a4effb7c8bdfb5e5a6a75 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/b6fc4bd5857a4effb7c8bdfb5e5a6a75 2024-11-20T11:19:36,345 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/74f1cb6d238a4c6fae0ff147dadfcd6c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/74f1cb6d238a4c6fae0ff147dadfcd6c 2024-11-20T11:19:36,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/074fd5365c794660b55b14ced76d002b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/074fd5365c794660b55b14ced76d002b 2024-11-20T11:19:36,347 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/4308e971c0fe46b6b68c158125e0256c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/4308e971c0fe46b6b68c158125e0256c 2024-11-20T11:19:36,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1f94ed39ac4e48faab94cd454a8b609c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1f94ed39ac4e48faab94cd454a8b609c 2024-11-20T11:19:36,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/baa86b0e39ff479abf77e3adb8d9536e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/baa86b0e39ff479abf77e3adb8d9536e 2024-11-20T11:19:36,351 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/bf7773ba5c42487fb048d0ef74a06ec9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/bf7773ba5c42487fb048d0ef74a06ec9 2024-11-20T11:19:36,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/62bb63ba65ff4f1ba22e1370d2334cb7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/62bb63ba65ff4f1ba22e1370d2334cb7 2024-11-20T11:19:36,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/213eff694aa943e1aee2d8c148bab137 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/213eff694aa943e1aee2d8c148bab137 2024-11-20T11:19:36,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1b0bb4abea144f1ab6199df1e8c5824e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/1b0bb4abea144f1ab6199df1e8c5824e 2024-11-20T11:19:36,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f2d8e51821f647d0a1d66d9ac292617f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/f2d8e51821f647d0a1d66d9ac292617f 2024-11-20T11:19:36,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/907a04771e0f4ae1baf53635b6a79454 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/907a04771e0f4ae1baf53635b6a79454 2024-11-20T11:19:36,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/9fd93d9511a3473dadf978c620263dcb to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/9fd93d9511a3473dadf978c620263dcb 2024-11-20T11:19:36,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e3dd528907e4474aab72dff1ea687fdd to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e3dd528907e4474aab72dff1ea687fdd 2024-11-20T11:19:36,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/2988d3a970e4446a90bba3f56dc15257 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/2988d3a970e4446a90bba3f56dc15257 2024-11-20T11:19:36,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e669ee7e73724073a18bca5826da7a32 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e669ee7e73724073a18bca5826da7a32 2024-11-20T11:19:36,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/70f5e46db9fa45e495f9d39ba1ebcca0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/70f5e46db9fa45e495f9d39ba1ebcca0 2024-11-20T11:19:36,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/44081c28a26544c19a457c587c9b24ce to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/44081c28a26544c19a457c587c9b24ce 2024-11-20T11:19:36,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/319bb4a76a604990935ceff0b3fc4ad2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/319bb4a76a604990935ceff0b3fc4ad2 2024-11-20T11:19:36,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/c1c6eb21f24c427ea97b99cf562c7417 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/c1c6eb21f24c427ea97b99cf562c7417 2024-11-20T11:19:36,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/0c0a86b867914278ac862b162db6b491, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9f8e86865eb64e7aa912d9575caceaac, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5a0dfb0c00104d8dbd81d0fe64a7f6a9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/b483161ec6cd43e09238aea4e83d84fa, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5ac974337d134411a8df9765aeca66ec, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/c52084e8243a430ba9e32945a0592a41, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/2f5523e9ca104a08a4c9ec683408e875, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a106e912abf94927a9140e96df23dbc0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/7f6f26df187b4ab489ed3412de7be0a1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/dc2a523ea4dd418694906295b887f54d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9ca317d7e6954a049a987a70500014a7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/86cbf769fdcd483f9e942e532a5b91e2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/d96112497c1d4540946c4b205050d275, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ce9a99256c5245e39daab6e2394f3215, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a8844b5c620d487dac249e271f0083d8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/86fc5325ade043e98c78b9c588ea75fc, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5e19a9ee8e6b4b5894e220a54348e32c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/6e7ce4c64877462ebebe6260ee072b56, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1d11a2a86b474be8ada819459efbb6a0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/96ef7e49a3db45e6b036c2d68b2e6659, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9f6bf422607c4ec88f6cdbed2b376c01, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/4626c1292a1b410484a7b1d74754688e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1a9e8c95ee9544f7a70324f3a2371103, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ad56fee419144ffdb0da9584ae227b1e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/4a8b3a47840d40639a10d416758956ae, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/3a9f22f770eb46c3973dc3fda0eab7b7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/0d41bf0fc87546c39e7e5dd8a0fa866c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a0eab9c02289416ba882354aa9ffb8d1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ffd19f0c32124e7699754a81dff57619, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/f9c7ffd28e7142f58c7bffe11b3af935, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/f6f6583e1f17412590be60dd224e77cb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/c4c1f0e6f0804c74a051560083278b2f] to archive 2024-11-20T11:19:36,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:19:36,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/0c0a86b867914278ac862b162db6b491 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/0c0a86b867914278ac862b162db6b491 2024-11-20T11:19:36,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9f8e86865eb64e7aa912d9575caceaac to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9f8e86865eb64e7aa912d9575caceaac 2024-11-20T11:19:36,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5a0dfb0c00104d8dbd81d0fe64a7f6a9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5a0dfb0c00104d8dbd81d0fe64a7f6a9 2024-11-20T11:19:36,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/b483161ec6cd43e09238aea4e83d84fa to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/b483161ec6cd43e09238aea4e83d84fa 2024-11-20T11:19:36,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5ac974337d134411a8df9765aeca66ec to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5ac974337d134411a8df9765aeca66ec 2024-11-20T11:19:36,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/c52084e8243a430ba9e32945a0592a41 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/c52084e8243a430ba9e32945a0592a41 2024-11-20T11:19:36,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/2f5523e9ca104a08a4c9ec683408e875 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/2f5523e9ca104a08a4c9ec683408e875 2024-11-20T11:19:36,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a106e912abf94927a9140e96df23dbc0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a106e912abf94927a9140e96df23dbc0 2024-11-20T11:19:36,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/7f6f26df187b4ab489ed3412de7be0a1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/7f6f26df187b4ab489ed3412de7be0a1 2024-11-20T11:19:36,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/dc2a523ea4dd418694906295b887f54d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/dc2a523ea4dd418694906295b887f54d 2024-11-20T11:19:36,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9ca317d7e6954a049a987a70500014a7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9ca317d7e6954a049a987a70500014a7 2024-11-20T11:19:36,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/86cbf769fdcd483f9e942e532a5b91e2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/86cbf769fdcd483f9e942e532a5b91e2 2024-11-20T11:19:36,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/d96112497c1d4540946c4b205050d275 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/d96112497c1d4540946c4b205050d275 2024-11-20T11:19:36,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ce9a99256c5245e39daab6e2394f3215 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ce9a99256c5245e39daab6e2394f3215 2024-11-20T11:19:36,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a8844b5c620d487dac249e271f0083d8 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a8844b5c620d487dac249e271f0083d8 2024-11-20T11:19:36,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/86fc5325ade043e98c78b9c588ea75fc to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/86fc5325ade043e98c78b9c588ea75fc 2024-11-20T11:19:36,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5e19a9ee8e6b4b5894e220a54348e32c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/5e19a9ee8e6b4b5894e220a54348e32c 2024-11-20T11:19:36,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/6e7ce4c64877462ebebe6260ee072b56 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/6e7ce4c64877462ebebe6260ee072b56 2024-11-20T11:19:36,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1d11a2a86b474be8ada819459efbb6a0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1d11a2a86b474be8ada819459efbb6a0 2024-11-20T11:19:36,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/96ef7e49a3db45e6b036c2d68b2e6659 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/96ef7e49a3db45e6b036c2d68b2e6659 2024-11-20T11:19:36,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9f6bf422607c4ec88f6cdbed2b376c01 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/9f6bf422607c4ec88f6cdbed2b376c01 2024-11-20T11:19:36,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/4626c1292a1b410484a7b1d74754688e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/4626c1292a1b410484a7b1d74754688e 2024-11-20T11:19:36,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1a9e8c95ee9544f7a70324f3a2371103 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1a9e8c95ee9544f7a70324f3a2371103 2024-11-20T11:19:36,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ad56fee419144ffdb0da9584ae227b1e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ad56fee419144ffdb0da9584ae227b1e 2024-11-20T11:19:36,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/4a8b3a47840d40639a10d416758956ae to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/4a8b3a47840d40639a10d416758956ae 2024-11-20T11:19:36,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/3a9f22f770eb46c3973dc3fda0eab7b7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/3a9f22f770eb46c3973dc3fda0eab7b7 2024-11-20T11:19:36,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/0d41bf0fc87546c39e7e5dd8a0fa866c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/0d41bf0fc87546c39e7e5dd8a0fa866c 2024-11-20T11:19:36,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a0eab9c02289416ba882354aa9ffb8d1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/a0eab9c02289416ba882354aa9ffb8d1 2024-11-20T11:19:36,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ffd19f0c32124e7699754a81dff57619 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/ffd19f0c32124e7699754a81dff57619 2024-11-20T11:19:36,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/f9c7ffd28e7142f58c7bffe11b3af935 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/f9c7ffd28e7142f58c7bffe11b3af935 2024-11-20T11:19:36,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/f6f6583e1f17412590be60dd224e77cb to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/f6f6583e1f17412590be60dd224e77cb 2024-11-20T11:19:36,408 DEBUG [StoreCloser-TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/c4c1f0e6f0804c74a051560083278b2f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/c4c1f0e6f0804c74a051560083278b2f 2024-11-20T11:19:36,413 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/recovered.edits/541.seqid, newMaxSeqId=541, maxSeqId=1 2024-11-20T11:19:36,416 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75. 2024-11-20T11:19:36,416 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1635): Region close journal for 96d866d8db5bf8a73bb64ed0351e8f75: 2024-11-20T11:19:36,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T11:19:36,418 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(170): Closed 96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:36,418 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=96d866d8db5bf8a73bb64ed0351e8f75, regionState=CLOSED 2024-11-20T11:19:36,421 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-20T11:19:36,421 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseRegionProcedure 96d866d8db5bf8a73bb64ed0351e8f75, server=ee8338ed7cc0,35185,1732101546666 in 2.0080 sec 2024-11-20T11:19:36,422 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-11-20T11:19:36,422 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=96d866d8db5bf8a73bb64ed0351e8f75, UNASSIGN in 2.0140 sec 2024-11-20T11:19:36,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-20T11:19:36,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.0200 sec 2024-11-20T11:19:36,426 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101576425"}]},"ts":"1732101576425"} 2024-11-20T11:19:36,427 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T11:19:36,429 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T11:19:36,431 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0380 sec 2024-11-20T11:19:36,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-20T11:19:36,502 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-20T11:19:36,506 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T11:19:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:19:36,511 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:19:36,512 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=40, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:19:36,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-20T11:19:36,515 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:36,519 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/recovered.edits] 2024-11-20T11:19:36,522 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/097675611d16440a96082d251d744742 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/097675611d16440a96082d251d744742 2024-11-20T11:19:36,524 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/3ab0c35de0ba4693bbc76dcc71e76ac6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/3ab0c35de0ba4693bbc76dcc71e76ac6 2024-11-20T11:19:36,526 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/90790e9a1f704bb7a3652df571dba63c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/90790e9a1f704bb7a3652df571dba63c 2024-11-20T11:19:36,528 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b1e1c533b8f744d98aca9eabd58e0e05 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/b1e1c533b8f744d98aca9eabd58e0e05 2024-11-20T11:19:36,529 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/e45e5d877c5f427caaf5c023ca5b6211 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/A/e45e5d877c5f427caaf5c023ca5b6211 2024-11-20T11:19:36,532 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/5ba9fb467f1a4f7eaf3c65ea150aaa71 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/5ba9fb467f1a4f7eaf3c65ea150aaa71 2024-11-20T11:19:36,534 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/b9eca8a340344ce7ae03647b748cbb68 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/b9eca8a340344ce7ae03647b748cbb68 2024-11-20T11:19:36,535 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e961611af35549d3b4a3f2831c2cacc0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/e961611af35549d3b4a3f2831c2cacc0 2024-11-20T11:19:36,536 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/ec2756f4e1c64aada9237c184819797d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/ec2756f4e1c64aada9237c184819797d 2024-11-20T11:19:36,538 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/fdf0625f0ac34dddb89b71ff2747e48b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/B/fdf0625f0ac34dddb89b71ff2747e48b 2024-11-20T11:19:36,541 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1e8f681214a449a585f7c6e3fb393eb2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/1e8f681214a449a585f7c6e3fb393eb2 2024-11-20T11:19:36,542 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/29f76ca7d4fd4e2b998a525acda3da90 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/29f76ca7d4fd4e2b998a525acda3da90 2024-11-20T11:19:36,544 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/306d5e42ae70430dbb0b1f0d2acf0a5f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/306d5e42ae70430dbb0b1f0d2acf0a5f 2024-11-20T11:19:36,545 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/600f893495e64fed89b882c223b7ed02 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/600f893495e64fed89b882c223b7ed02 2024-11-20T11:19:36,546 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/fba06dab766a4db599288590fdf26e6f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/C/fba06dab766a4db599288590fdf26e6f 2024-11-20T11:19:36,549 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/recovered.edits/541.seqid to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75/recovered.edits/541.seqid 2024-11-20T11:19:36,550 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/96d866d8db5bf8a73bb64ed0351e8f75 2024-11-20T11:19:36,550 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T11:19:36,555 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=40, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:19:36,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-20T11:19:36,562 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T11:19:36,597 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T11:19:36,598 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=40, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:19:36,598 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T11:19:36,599 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732101576599"}]},"ts":"9223372036854775807"} 2024-11-20T11:19:36,602 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T11:19:36,602 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 96d866d8db5bf8a73bb64ed0351e8f75, NAME => 'TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T11:19:36,602 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T11:19:36,602 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732101576602"}]},"ts":"9223372036854775807"} 2024-11-20T11:19:36,605 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T11:19:36,608 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=40, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:19:36,609 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 102 msec 2024-11-20T11:19:36,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-20T11:19:36,613 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 40 completed 2024-11-20T11:19:36,626 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=240 (was 219) Potentially hanging thread: hconnection-0x6a7920e2-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6a7920e2-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1811910228_22 at /127.0.0.1:48038 [Waiting for operation #306] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_669624712_22 at /127.0.0.1:40026 [Waiting for operation #331] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;ee8338ed7cc0:35185-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6a7920e2-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_669624712_22 at /127.0.0.1:40816 [Waiting for operation #64] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6a7920e2-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=459 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=284 (was 174) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6029 (was 6574) 2024-11-20T11:19:36,635 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=240, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=284, ProcessCount=11, AvailableMemoryMB=6028 2024-11-20T11:19:36,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T11:19:36,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T11:19:36,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T11:19:36,640 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T11:19:36,640 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:36,640 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 41 2024-11-20T11:19:36,641 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T11:19:36,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T11:19:36,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741950_1126 (size=960) 2024-11-20T11:19:36,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T11:19:36,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T11:19:37,049 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830 2024-11-20T11:19:37,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741951_1127 (size=53) 2024-11-20T11:19:37,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T11:19:37,456 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:19:37,456 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 8dd1f041f5ff83d363163edeec4cd720, disabling compactions & flushes 2024-11-20T11:19:37,456 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:37,456 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:37,456 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. after waiting 0 ms 2024-11-20T11:19:37,456 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:37,456 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:37,456 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:37,457 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T11:19:37,458 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732101577457"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732101577457"}]},"ts":"1732101577457"} 2024-11-20T11:19:37,459 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T11:19:37,460 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T11:19:37,460 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101577460"}]},"ts":"1732101577460"} 2024-11-20T11:19:37,461 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T11:19:37,466 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dd1f041f5ff83d363163edeec4cd720, ASSIGN}] 2024-11-20T11:19:37,467 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dd1f041f5ff83d363163edeec4cd720, ASSIGN 2024-11-20T11:19:37,468 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dd1f041f5ff83d363163edeec4cd720, ASSIGN; state=OFFLINE, location=ee8338ed7cc0,35185,1732101546666; forceNewPlan=false, retain=false 2024-11-20T11:19:37,618 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=8dd1f041f5ff83d363163edeec4cd720, regionState=OPENING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:37,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; OpenRegionProcedure 8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:19:37,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T11:19:37,772 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:37,775 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:37,775 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7285): Opening region: {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} 2024-11-20T11:19:37,776 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:37,776 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:19:37,776 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7327): checking encryption for 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:37,776 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7330): checking classloading for 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:37,777 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:37,779 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:19:37,779 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dd1f041f5ff83d363163edeec4cd720 columnFamilyName A 2024-11-20T11:19:37,779 DEBUG [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:37,780 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.HStore(327): Store=8dd1f041f5ff83d363163edeec4cd720/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:37,780 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:37,781 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:19:37,781 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dd1f041f5ff83d363163edeec4cd720 columnFamilyName B 2024-11-20T11:19:37,781 DEBUG [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:37,782 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.HStore(327): Store=8dd1f041f5ff83d363163edeec4cd720/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:37,782 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:37,783 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:19:37,783 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dd1f041f5ff83d363163edeec4cd720 columnFamilyName C 2024-11-20T11:19:37,783 DEBUG [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:37,784 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.HStore(327): Store=8dd1f041f5ff83d363163edeec4cd720/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:37,784 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:37,784 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:37,785 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:37,786 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T11:19:37,787 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1085): writing seq id for 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:37,789 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T11:19:37,789 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1102): Opened 8dd1f041f5ff83d363163edeec4cd720; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62855207, jitterRate=-0.06338442862033844}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T11:19:37,790 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1001): Region open journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:37,791 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., pid=43, masterSystemTime=1732101577772 2024-11-20T11:19:37,792 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:37,792 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:37,793 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=8dd1f041f5ff83d363163edeec4cd720, regionState=OPEN, openSeqNum=2, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:37,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-20T11:19:37,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; OpenRegionProcedure 8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 in 174 msec 2024-11-20T11:19:37,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-20T11:19:37,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dd1f041f5ff83d363163edeec4cd720, ASSIGN in 329 msec 2024-11-20T11:19:37,798 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T11:19:37,798 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101577798"}]},"ts":"1732101577798"} 2024-11-20T11:19:37,799 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T11:19:37,802 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T11:19:37,803 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1640 sec 2024-11-20T11:19:38,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T11:19:38,746 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-11-20T11:19:38,749 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a9b9802 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@118b007e 2024-11-20T11:19:38,753 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d29de25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:38,755 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:38,757 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54562, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:38,759 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T11:19:38,760 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34274, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T11:19:38,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T11:19:38,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T11:19:38,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=44, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T11:19:38,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741952_1128 (size=996) 2024-11-20T11:19:39,188 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T11:19:39,188 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T11:19:39,192 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T11:19:39,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dd1f041f5ff83d363163edeec4cd720, REOPEN/MOVE}] 2024-11-20T11:19:39,203 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dd1f041f5ff83d363163edeec4cd720, REOPEN/MOVE 2024-11-20T11:19:39,204 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=8dd1f041f5ff83d363163edeec4cd720, regionState=CLOSING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,205 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T11:19:39,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; CloseRegionProcedure 8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:19:39,357 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,357 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(124): Close 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:39,357 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T11:19:39,358 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1681): Closing 8dd1f041f5ff83d363163edeec4cd720, disabling compactions & flushes 2024-11-20T11:19:39,358 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:39,358 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:39,358 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. after waiting 0 ms 2024-11-20T11:19:39,358 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:39,363 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T11:19:39,363 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:39,363 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1635): Region close journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:39,363 WARN [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegionServer(3786): Not adding moved region record: 8dd1f041f5ff83d363163edeec4cd720 to self. 2024-11-20T11:19:39,365 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(170): Closed 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:39,365 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=8dd1f041f5ff83d363163edeec4cd720, regionState=CLOSED 2024-11-20T11:19:39,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-11-20T11:19:39,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; CloseRegionProcedure 8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 in 162 msec 2024-11-20T11:19:39,369 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dd1f041f5ff83d363163edeec4cd720, REOPEN/MOVE; state=CLOSED, location=ee8338ed7cc0,35185,1732101546666; forceNewPlan=false, retain=true 2024-11-20T11:19:39,519 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=8dd1f041f5ff83d363163edeec4cd720, regionState=OPENING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,521 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE; OpenRegionProcedure 8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:19:39,672 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,675 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:39,676 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7285): Opening region: {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} 2024-11-20T11:19:39,676 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:39,676 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:19:39,676 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7327): checking encryption for 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:39,676 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7330): checking classloading for 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:39,679 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:39,679 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:19:39,684 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dd1f041f5ff83d363163edeec4cd720 columnFamilyName A 2024-11-20T11:19:39,686 DEBUG [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:39,687 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.HStore(327): Store=8dd1f041f5ff83d363163edeec4cd720/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:39,687 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:39,688 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:19:39,688 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dd1f041f5ff83d363163edeec4cd720 columnFamilyName B 2024-11-20T11:19:39,688 DEBUG [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:39,689 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.HStore(327): Store=8dd1f041f5ff83d363163edeec4cd720/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:39,689 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:39,689 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:19:39,690 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dd1f041f5ff83d363163edeec4cd720 columnFamilyName C 2024-11-20T11:19:39,690 DEBUG [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:39,690 INFO [StoreOpener-8dd1f041f5ff83d363163edeec4cd720-1 {}] regionserver.HStore(327): Store=8dd1f041f5ff83d363163edeec4cd720/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:19:39,690 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:39,691 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:39,692 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:39,693 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T11:19:39,694 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1085): writing seq id for 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:39,695 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1102): Opened 8dd1f041f5ff83d363163edeec4cd720; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64903951, jitterRate=-0.03285576403141022}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T11:19:39,696 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1001): Region open journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:39,697 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., pid=48, masterSystemTime=1732101579672 2024-11-20T11:19:39,699 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:39,699 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:39,699 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=8dd1f041f5ff83d363163edeec4cd720, regionState=OPEN, openSeqNum=5, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,702 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46 2024-11-20T11:19:39,702 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; OpenRegionProcedure 8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 in 180 msec 2024-11-20T11:19:39,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-20T11:19:39,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dd1f041f5ff83d363163edeec4cd720, REOPEN/MOVE in 499 msec 2024-11-20T11:19:39,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-20T11:19:39,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 514 msec 2024-11-20T11:19:39,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 938 msec 2024-11-20T11:19:39,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=44 2024-11-20T11:19:39,718 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cae6c5c to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79982672 2024-11-20T11:19:39,725 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@433e2b26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:39,726 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c820ef9 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b4bd1ba 2024-11-20T11:19:39,729 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@176c5c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:39,731 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0b44b1e5 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@454f1431 2024-11-20T11:19:39,733 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@190853fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:39,735 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e904d8 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@505d5ccd 2024-11-20T11:19:39,737 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c5c4716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:39,738 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a4c53ed to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@367f47f7 2024-11-20T11:19:39,742 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2885d2d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:39,743 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x247c0c93 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22e911df 2024-11-20T11:19:39,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78cafade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:39,747 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x517ff977 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b727d6e 2024-11-20T11:19:39,753 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c16cd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:39,754 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3448d233 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7940d9 2024-11-20T11:19:39,757 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341384e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:39,758 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a11164b to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c38ee58 2024-11-20T11:19:39,761 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26b120d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:19:39,765 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:39,765 DEBUG [hconnection-0x1adbc80d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:39,766 DEBUG [hconnection-0x2696aa23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:39,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-20T11:19:39,767 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:39,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T11:19:39,767 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:39,768 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:39,768 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:39,768 DEBUG [hconnection-0x2ac3e101-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:39,769 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54578, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:39,770 DEBUG [hconnection-0x3dfb4f96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:39,771 DEBUG [hconnection-0x4491bf73-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:39,771 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:39,771 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54588, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:39,772 DEBUG [hconnection-0x27a3616c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:39,772 DEBUG [hconnection-0x3a83afe5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:39,772 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54604, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:39,773 DEBUG [hconnection-0x6b4524be-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:39,773 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54606, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:39,773 DEBUG [hconnection-0x40fddafc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:19:39,773 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54608, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:39,774 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54624, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:39,782 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54634, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:19:39,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:39,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T11:19:39,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:39,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:39,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:39,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:39,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:39,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:39,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120efb3cc56fc97425da0bff447718a395d_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101579786/Put/seqid=0 2024-11-20T11:19:39,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741953_1129 (size=12154) 2024-11-20T11:19:39,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T11:19:39,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:39,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101639872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:39,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101639873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:39,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101639882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:39,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101639883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:39,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101639884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,920 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T11:19:39,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:39,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:39,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:39,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:39,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:39,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:39,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101639984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101639984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101639990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101639990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:39,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101639990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T11:19:40,074 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T11:19:40,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:40,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:40,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:40,076 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:40,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101640190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:40,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101640190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:40,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:40,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101640197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101640195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:40,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101640197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,230 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,230 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T11:19:40,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:40,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:40,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:40,231 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,232 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:40,238 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120efb3cc56fc97425da0bff447718a395d_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120efb3cc56fc97425da0bff447718a395d_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:40,240 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/e956edf7d8a5449a8b1cddb021caebc9, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:40,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/e956edf7d8a5449a8b1cddb021caebc9 is 175, key is test_row_0/A:col10/1732101579786/Put/seqid=0 2024-11-20T11:19:40,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741954_1130 (size=30955) 2024-11-20T11:19:40,292 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/e956edf7d8a5449a8b1cddb021caebc9 2024-11-20T11:19:40,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/adc9386efdb14a86b0555a1bd05aa396 is 50, key is test_row_0/B:col10/1732101579786/Put/seqid=0 2024-11-20T11:19:40,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741955_1131 (size=12001) 2024-11-20T11:19:40,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T11:19:40,383 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T11:19:40,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:40,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:40,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:40,385 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:40,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101640495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:40,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101640495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101640499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101640500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101640501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,537 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T11:19:40,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:40,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:40,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:40,539 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,691 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T11:19:40,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:40,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:40,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:40,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:40,738 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T11:19:40,739 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34288, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T11:19:40,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/adc9386efdb14a86b0555a1bd05aa396 2024-11-20T11:19:40,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/000ef89b88f0435ea6ddeadeb3e24b58 is 50, key is test_row_0/C:col10/1732101579786/Put/seqid=0 2024-11-20T11:19:40,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741956_1132 (size=12001) 2024-11-20T11:19:40,790 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/000ef89b88f0435ea6ddeadeb3e24b58 2024-11-20T11:19:40,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/e956edf7d8a5449a8b1cddb021caebc9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e956edf7d8a5449a8b1cddb021caebc9 2024-11-20T11:19:40,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e956edf7d8a5449a8b1cddb021caebc9, entries=150, sequenceid=17, filesize=30.2 K 2024-11-20T11:19:40,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/adc9386efdb14a86b0555a1bd05aa396 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/adc9386efdb14a86b0555a1bd05aa396 2024-11-20T11:19:40,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/adc9386efdb14a86b0555a1bd05aa396, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T11:19:40,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/000ef89b88f0435ea6ddeadeb3e24b58 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/000ef89b88f0435ea6ddeadeb3e24b58 2024-11-20T11:19:40,823 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/000ef89b88f0435ea6ddeadeb3e24b58, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T11:19:40,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 8dd1f041f5ff83d363163edeec4cd720 in 1039ms, sequenceid=17, compaction requested=false 2024-11-20T11:19:40,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:40,846 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:40,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T11:19:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:40,847 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T11:19:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:40,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204e505d2e93f64f02b613167d7659201e_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101579879/Put/seqid=0 2024-11-20T11:19:40,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741957_1133 (size=12154) 2024-11-20T11:19:40,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T11:19:40,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:40,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:41,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101641008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101641009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101641010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101641012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101641013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101641114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101641114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101641117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101641117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101641117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:41,284 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204e505d2e93f64f02b613167d7659201e_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204e505d2e93f64f02b613167d7659201e_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:41,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/1123abe981d6444f855e5e68cc364054, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:41,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/1123abe981d6444f855e5e68cc364054 is 175, key is test_row_0/A:col10/1732101579879/Put/seqid=0 2024-11-20T11:19:41,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741958_1134 (size=30955) 2024-11-20T11:19:41,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101641316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101641316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101641322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101641322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101641323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101641618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101641620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101641625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101641626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:41,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101641628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:41,692 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/1123abe981d6444f855e5e68cc364054 2024-11-20T11:19:41,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/85b1b37f41b74af584463f6ba6cbb0e6 is 50, key is test_row_0/B:col10/1732101579879/Put/seqid=0 2024-11-20T11:19:41,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741959_1135 (size=12001) 2024-11-20T11:19:41,721 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/85b1b37f41b74af584463f6ba6cbb0e6 2024-11-20T11:19:41,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/8eba5d8880ea41c4a929c3c6d21794d9 is 50, key is test_row_0/C:col10/1732101579879/Put/seqid=0 2024-11-20T11:19:41,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741960_1136 (size=12001) 2024-11-20T11:19:41,739 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/8eba5d8880ea41c4a929c3c6d21794d9 2024-11-20T11:19:41,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/1123abe981d6444f855e5e68cc364054 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/1123abe981d6444f855e5e68cc364054 2024-11-20T11:19:41,751 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/1123abe981d6444f855e5e68cc364054, entries=150, sequenceid=41, filesize=30.2 K 2024-11-20T11:19:41,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/85b1b37f41b74af584463f6ba6cbb0e6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/85b1b37f41b74af584463f6ba6cbb0e6 2024-11-20T11:19:41,757 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/85b1b37f41b74af584463f6ba6cbb0e6, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T11:19:41,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/8eba5d8880ea41c4a929c3c6d21794d9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/8eba5d8880ea41c4a929c3c6d21794d9 2024-11-20T11:19:41,764 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/8eba5d8880ea41c4a929c3c6d21794d9, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T11:19:41,765 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 8dd1f041f5ff83d363163edeec4cd720 in 918ms, sequenceid=41, compaction requested=false 2024-11-20T11:19:41,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:41,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:41,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-20T11:19:41,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-20T11:19:41,768 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-20T11:19:41,768 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9980 sec 2024-11-20T11:19:41,770 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.0040 sec 2024-11-20T11:19:41,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T11:19:41,885 INFO [Thread-645 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-20T11:19:41,886 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:41,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-20T11:19:41,888 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:41,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T11:19:41,889 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:41,889 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:41,975 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T11:19:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T11:19:42,041 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T11:19:42,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:42,043 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T11:19:42,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:42,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:42,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:42,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:42,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:42,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:42,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112055d3e8a45266494e8f71168146e2356c_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101581009/Put/seqid=0 2024-11-20T11:19:42,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741961_1137 (size=12154) 2024-11-20T11:19:42,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:42,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:42,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101642148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101642150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101642150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101642151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101642151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T11:19:42,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101642252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101642253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101642256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101642257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101642258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101642455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101642457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101642459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101642460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101642462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:42,478 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112055d3e8a45266494e8f71168146e2356c_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112055d3e8a45266494e8f71168146e2356c_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:42,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/561da633798f4b67a54f493e82216a9c, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:42,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/561da633798f4b67a54f493e82216a9c is 175, key is test_row_0/A:col10/1732101581009/Put/seqid=0 2024-11-20T11:19:42,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741962_1138 (size=30955) 2024-11-20T11:19:42,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T11:19:42,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101642759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101642761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101642763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101642763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101642768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:42,885 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/561da633798f4b67a54f493e82216a9c 2024-11-20T11:19:42,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/c6c2f0be4eed45f8aea82fa8f5e7e394 is 50, key is test_row_0/B:col10/1732101581009/Put/seqid=0 2024-11-20T11:19:42,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741963_1139 (size=12001) 2024-11-20T11:19:42,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T11:19:43,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:43,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101643262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:43,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:43,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101643267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:43,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:43,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101643268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:43,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:43,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101643271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:43,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:43,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101643273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:43,300 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/c6c2f0be4eed45f8aea82fa8f5e7e394 2024-11-20T11:19:43,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/2bc86f637bd84bc38a87c3886bcef2cd is 50, key is test_row_0/C:col10/1732101581009/Put/seqid=0 2024-11-20T11:19:43,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741964_1140 (size=12001) 2024-11-20T11:19:43,316 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/2bc86f637bd84bc38a87c3886bcef2cd 2024-11-20T11:19:43,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/561da633798f4b67a54f493e82216a9c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/561da633798f4b67a54f493e82216a9c 2024-11-20T11:19:43,327 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/561da633798f4b67a54f493e82216a9c, entries=150, sequenceid=54, filesize=30.2 K 2024-11-20T11:19:43,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/c6c2f0be4eed45f8aea82fa8f5e7e394 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c6c2f0be4eed45f8aea82fa8f5e7e394 2024-11-20T11:19:43,335 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c6c2f0be4eed45f8aea82fa8f5e7e394, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T11:19:43,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/2bc86f637bd84bc38a87c3886bcef2cd as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/2bc86f637bd84bc38a87c3886bcef2cd 2024-11-20T11:19:43,341 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/2bc86f637bd84bc38a87c3886bcef2cd, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T11:19:43,342 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 8dd1f041f5ff83d363163edeec4cd720 in 1299ms, sequenceid=54, compaction requested=true 2024-11-20T11:19:43,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:43,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:43,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-20T11:19:43,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-20T11:19:43,345 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-20T11:19:43,345 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4550 sec 2024-11-20T11:19:43,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.4600 sec 2024-11-20T11:19:43,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T11:19:43,993 INFO [Thread-645 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-20T11:19:43,994 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:43,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-20T11:19:43,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T11:19:43,995 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:43,996 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:43,996 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:44,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T11:19:44,148 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T11:19:44,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:44,149 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T11:19:44,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:44,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:44,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:44,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:44,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:44,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:44,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c9b601d3376f47ebb8cbe9ad0d34fb55_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101582138/Put/seqid=0 2024-11-20T11:19:44,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741965_1141 (size=12154) 2024-11-20T11:19:44,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:44,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:44,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101644285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101644285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101644286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101644287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101644288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T11:19:44,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101644389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101644390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101644390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101644391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101644391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:44,583 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c9b601d3376f47ebb8cbe9ad0d34fb55_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c9b601d3376f47ebb8cbe9ad0d34fb55_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:44,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/9ce419c2a2144ee38cbfc1981896cf9f, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:44,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/9ce419c2a2144ee38cbfc1981896cf9f is 175, key is test_row_0/A:col10/1732101582138/Put/seqid=0 2024-11-20T11:19:44,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741966_1142 (size=30955) 2024-11-20T11:19:44,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101644592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101644592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101644593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101644593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101644593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T11:19:44,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101644894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101644895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101644895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101644896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:44,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101644898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:44,990 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/9ce419c2a2144ee38cbfc1981896cf9f 2024-11-20T11:19:45,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/5f2e6fb8676a4a9bbec526def9928c36 is 50, key is test_row_0/B:col10/1732101582138/Put/seqid=0 2024-11-20T11:19:45,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741967_1143 (size=12001) 2024-11-20T11:19:45,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T11:19:45,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:45,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101645398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:45,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:45,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101645402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:45,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:45,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101645403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:45,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:45,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101645406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:45,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:45,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101645406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:45,412 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/5f2e6fb8676a4a9bbec526def9928c36 2024-11-20T11:19:45,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/5dda63f6774c47b6a8af8d0e3be1f579 is 50, key is test_row_0/C:col10/1732101582138/Put/seqid=0 2024-11-20T11:19:45,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741968_1144 (size=12001) 2024-11-20T11:19:45,832 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/5dda63f6774c47b6a8af8d0e3be1f579 2024-11-20T11:19:45,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/9ce419c2a2144ee38cbfc1981896cf9f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/9ce419c2a2144ee38cbfc1981896cf9f 2024-11-20T11:19:45,844 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/9ce419c2a2144ee38cbfc1981896cf9f, entries=150, sequenceid=77, filesize=30.2 K 2024-11-20T11:19:45,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/5f2e6fb8676a4a9bbec526def9928c36 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/5f2e6fb8676a4a9bbec526def9928c36 2024-11-20T11:19:45,850 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/5f2e6fb8676a4a9bbec526def9928c36, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T11:19:45,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/5dda63f6774c47b6a8af8d0e3be1f579 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/5dda63f6774c47b6a8af8d0e3be1f579 2024-11-20T11:19:45,856 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/5dda63f6774c47b6a8af8d0e3be1f579, entries=150, sequenceid=77, filesize=11.7 K 2024-11-20T11:19:45,857 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 8dd1f041f5ff83d363163edeec4cd720 in 1707ms, sequenceid=77, compaction requested=true 2024-11-20T11:19:45,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:45,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:45,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-20T11:19:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-20T11:19:45,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-20T11:19:45,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8620 sec 2024-11-20T11:19:45,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.8670 sec 2024-11-20T11:19:46,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T11:19:46,100 INFO [Thread-645 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-20T11:19:46,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:46,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-20T11:19:46,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T11:19:46,103 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:46,104 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:46,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:46,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T11:19:46,255 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T11:19:46,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:46,256 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T11:19:46,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:46,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:46,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:46,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:46,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:46,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:46,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120482084ec32194f9b9c66e0c023246362_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101584284/Put/seqid=0 2024-11-20T11:19:46,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741969_1145 (size=12154) 2024-11-20T11:19:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T11:19:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:46,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:46,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T11:19:46,416 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T11:19:46,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101646426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101646429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101646459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101646459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101646459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101646560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101646560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101646563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101646563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101646564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:46,676 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120482084ec32194f9b9c66e0c023246362_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120482084ec32194f9b9c66e0c023246362_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:46,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/4a57a0176ba745caa9150305279d6520, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:46,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/4a57a0176ba745caa9150305279d6520 is 175, key is test_row_0/A:col10/1732101584284/Put/seqid=0 2024-11-20T11:19:46,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741970_1146 (size=30955) 2024-11-20T11:19:46,684 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=90, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/4a57a0176ba745caa9150305279d6520 2024-11-20T11:19:46,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/902cbe220911469c893eebe9d8b232f1 is 50, key is test_row_0/B:col10/1732101584284/Put/seqid=0 2024-11-20T11:19:46,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741971_1147 (size=12001) 2024-11-20T11:19:46,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T11:19:46,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101646764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101646764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101646767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101646767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:46,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:46,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101646768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101647067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101647068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101647071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101647072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101647074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,096 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/902cbe220911469c893eebe9d8b232f1 2024-11-20T11:19:47,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/326de11523dc4d45b88f5c2e8817a25d is 50, key is test_row_0/C:col10/1732101584284/Put/seqid=0 2024-11-20T11:19:47,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741972_1148 (size=12001) 2024-11-20T11:19:47,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T11:19:47,510 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/326de11523dc4d45b88f5c2e8817a25d 2024-11-20T11:19:47,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/4a57a0176ba745caa9150305279d6520 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/4a57a0176ba745caa9150305279d6520 2024-11-20T11:19:47,521 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/4a57a0176ba745caa9150305279d6520, entries=150, sequenceid=90, filesize=30.2 K 2024-11-20T11:19:47,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/902cbe220911469c893eebe9d8b232f1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/902cbe220911469c893eebe9d8b232f1 2024-11-20T11:19:47,527 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/902cbe220911469c893eebe9d8b232f1, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T11:19:47,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/326de11523dc4d45b88f5c2e8817a25d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/326de11523dc4d45b88f5c2e8817a25d 2024-11-20T11:19:47,533 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/326de11523dc4d45b88f5c2e8817a25d, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T11:19:47,533 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 8dd1f041f5ff83d363163edeec4cd720 in 1277ms, sequenceid=90, compaction requested=true 2024-11-20T11:19:47,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:47,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:47,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-20T11:19:47,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-20T11:19:47,536 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-20T11:19:47,536 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4310 sec 2024-11-20T11:19:47,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.4360 sec 2024-11-20T11:19:47,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:47,573 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T11:19:47,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:47,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:47,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:47,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:47,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:47,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:47,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112004a9511653374ffe8e65b95f26d49dc5_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101586430/Put/seqid=0 2024-11-20T11:19:47,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101647580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101647580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101647582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741973_1149 (size=17034) 2024-11-20T11:19:47,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101647584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101647584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101647685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101647685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101647686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101647690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101647690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101647890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101647890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101647890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101647892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:47,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101647893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:47,989 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:47,994 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112004a9511653374ffe8e65b95f26d49dc5_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112004a9511653374ffe8e65b95f26d49dc5_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:47,995 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/ecb333ba38bf454fae33ab0133cec11e, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:47,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/ecb333ba38bf454fae33ab0133cec11e is 175, key is test_row_0/A:col10/1732101586430/Put/seqid=0 2024-11-20T11:19:48,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741974_1150 (size=48139) 2024-11-20T11:19:48,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:48,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101648194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:48,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101648194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:48,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101648195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:48,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101648195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:48,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101648197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T11:19:48,207 INFO [Thread-645 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-20T11:19:48,208 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:48,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-20T11:19:48,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T11:19:48,210 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:48,210 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:48,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:48,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T11:19:48,362 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T11:19:48,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:48,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,363 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:48,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:48,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:48,401 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/ecb333ba38bf454fae33ab0133cec11e 2024-11-20T11:19:48,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/480c8f02e3ee48bf8977e15d57c3f6dd is 50, key is test_row_0/B:col10/1732101586430/Put/seqid=0 2024-11-20T11:19:48,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741975_1151 (size=12001) 2024-11-20T11:19:48,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T11:19:48,515 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T11:19:48,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:48,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:48,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:48,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:48,668 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T11:19:48,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:48,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,669 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:48,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:48,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:48,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:48,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:48,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101648699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101648698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:48,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101648700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:48,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101648701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:48,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101648702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T11:19:48,814 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/480c8f02e3ee48bf8977e15d57c3f6dd 2024-11-20T11:19:48,822 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,822 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T11:19:48,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:48,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,823 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:48,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:48,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:48,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/31ee1cbc4ddb4e438f60e0f0462eacbd is 50, key is test_row_0/C:col10/1732101586430/Put/seqid=0 2024-11-20T11:19:48,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741976_1152 (size=12001) 2024-11-20T11:19:48,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/31ee1cbc4ddb4e438f60e0f0462eacbd 2024-11-20T11:19:48,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/ecb333ba38bf454fae33ab0133cec11e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/ecb333ba38bf454fae33ab0133cec11e 2024-11-20T11:19:48,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/ecb333ba38bf454fae33ab0133cec11e, entries=250, sequenceid=117, filesize=47.0 K 2024-11-20T11:19:48,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/480c8f02e3ee48bf8977e15d57c3f6dd as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/480c8f02e3ee48bf8977e15d57c3f6dd 2024-11-20T11:19:48,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/480c8f02e3ee48bf8977e15d57c3f6dd, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T11:19:48,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/31ee1cbc4ddb4e438f60e0f0462eacbd as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/31ee1cbc4ddb4e438f60e0f0462eacbd 2024-11-20T11:19:48,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/31ee1cbc4ddb4e438f60e0f0462eacbd, entries=150, sequenceid=117, filesize=11.7 K 2024-11-20T11:19:48,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 8dd1f041f5ff83d363163edeec4cd720 in 1287ms, sequenceid=117, compaction requested=true 2024-11-20T11:19:48,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:48,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:48,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:48,861 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-20T11:19:48,861 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-20T11:19:48,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:48,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:48,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:48,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:48,863 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72006 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-20T11:19:48,864 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/B is initiating minor compaction (all files) 2024-11-20T11:19:48,864 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/B in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,864 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/adc9386efdb14a86b0555a1bd05aa396, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/85b1b37f41b74af584463f6ba6cbb0e6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c6c2f0be4eed45f8aea82fa8f5e7e394, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/5f2e6fb8676a4a9bbec526def9928c36, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/902cbe220911469c893eebe9d8b232f1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/480c8f02e3ee48bf8977e15d57c3f6dd] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=70.3 K 2024-11-20T11:19:48,864 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 202914 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-20T11:19:48,864 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/A is initiating minor compaction (all files) 2024-11-20T11:19:48,865 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/A in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,865 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e956edf7d8a5449a8b1cddb021caebc9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/1123abe981d6444f855e5e68cc364054, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/561da633798f4b67a54f493e82216a9c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/9ce419c2a2144ee38cbfc1981896cf9f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/4a57a0176ba745caa9150305279d6520, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/ecb333ba38bf454fae33ab0133cec11e] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=198.2 K 2024-11-20T11:19:48,865 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=10 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,865 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e956edf7d8a5449a8b1cddb021caebc9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/1123abe981d6444f855e5e68cc364054, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/561da633798f4b67a54f493e82216a9c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/9ce419c2a2144ee38cbfc1981896cf9f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/4a57a0176ba745caa9150305279d6520, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/ecb333ba38bf454fae33ab0133cec11e] 2024-11-20T11:19:48,866 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting e956edf7d8a5449a8b1cddb021caebc9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732101579776 2024-11-20T11:19:48,866 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting adc9386efdb14a86b0555a1bd05aa396, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732101579776 2024-11-20T11:19:48,867 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1123abe981d6444f855e5e68cc364054, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732101579879 2024-11-20T11:19:48,867 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 85b1b37f41b74af584463f6ba6cbb0e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732101579879 2024-11-20T11:19:48,867 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 561da633798f4b67a54f493e82216a9c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732101581008 2024-11-20T11:19:48,867 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting c6c2f0be4eed45f8aea82fa8f5e7e394, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732101581008 2024-11-20T11:19:48,868 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ce419c2a2144ee38cbfc1981896cf9f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732101582138 2024-11-20T11:19:48,868 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f2e6fb8676a4a9bbec526def9928c36, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732101582138 2024-11-20T11:19:48,869 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a57a0176ba745caa9150305279d6520, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732101584284 2024-11-20T11:19:48,869 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 902cbe220911469c893eebe9d8b232f1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732101584284 2024-11-20T11:19:48,869 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting ecb333ba38bf454fae33ab0133cec11e, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732101586424 2024-11-20T11:19:48,869 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 480c8f02e3ee48bf8977e15d57c3f6dd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732101586424 2024-11-20T11:19:48,894 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#B#compaction#129 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:48,895 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/e454e56b2aeb439d903173a3d52c685e is 50, key is test_row_0/B:col10/1732101586430/Put/seqid=0 2024-11-20T11:19:48,901 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:48,919 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120f5afa73a0d22467c975b9e36eb8e81ca_8dd1f041f5ff83d363163edeec4cd720 store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:48,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741977_1153 (size=12207) 2024-11-20T11:19:48,929 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120f5afa73a0d22467c975b9e36eb8e81ca_8dd1f041f5ff83d363163edeec4cd720, store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:48,929 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f5afa73a0d22467c975b9e36eb8e81ca_8dd1f041f5ff83d363163edeec4cd720 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:48,930 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/e454e56b2aeb439d903173a3d52c685e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/e454e56b2aeb439d903173a3d52c685e 2024-11-20T11:19:48,939 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/B of 8dd1f041f5ff83d363163edeec4cd720 into e454e56b2aeb439d903173a3d52c685e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:48,939 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:48,939 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/B, priority=10, startTime=1732101588861; duration=0sec 2024-11-20T11:19:48,939 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:48,939 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:B 2024-11-20T11:19:48,939 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-20T11:19:48,942 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72006 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-20T11:19:48,942 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/C is initiating minor compaction (all files) 2024-11-20T11:19:48,942 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/C in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,942 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/000ef89b88f0435ea6ddeadeb3e24b58, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/8eba5d8880ea41c4a929c3c6d21794d9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/2bc86f637bd84bc38a87c3886bcef2cd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/5dda63f6774c47b6a8af8d0e3be1f579, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/326de11523dc4d45b88f5c2e8817a25d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/31ee1cbc4ddb4e438f60e0f0462eacbd] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=70.3 K 2024-11-20T11:19:48,943 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 000ef89b88f0435ea6ddeadeb3e24b58, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732101579776 2024-11-20T11:19:48,943 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 8eba5d8880ea41c4a929c3c6d21794d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732101579879 2024-11-20T11:19:48,944 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bc86f637bd84bc38a87c3886bcef2cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732101581008 2024-11-20T11:19:48,944 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 5dda63f6774c47b6a8af8d0e3be1f579, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732101582138 2024-11-20T11:19:48,945 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 326de11523dc4d45b88f5c2e8817a25d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732101584284 2024-11-20T11:19:48,945 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 31ee1cbc4ddb4e438f60e0f0462eacbd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732101586424 2024-11-20T11:19:48,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741978_1154 (size=4469) 2024-11-20T11:19:48,961 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#C#compaction#131 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:48,962 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/980e839be2bc4973a781b4d56d69dd7e is 50, key is test_row_0/C:col10/1732101586430/Put/seqid=0 2024-11-20T11:19:48,963 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#A#compaction#130 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:48,965 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/33da7bdcac604f1f9b6cd8e7413a13d7 is 175, key is test_row_0/A:col10/1732101586430/Put/seqid=0 2024-11-20T11:19:48,975 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:48,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T11:19:48,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:48,976 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T11:19:48,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:48,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:48,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:48,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:48,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:48,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:48,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741979_1155 (size=12207) 2024-11-20T11:19:48,998 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/980e839be2bc4973a781b4d56d69dd7e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/980e839be2bc4973a781b4d56d69dd7e 2024-11-20T11:19:49,004 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/C of 8dd1f041f5ff83d363163edeec4cd720 into 980e839be2bc4973a781b4d56d69dd7e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:49,005 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:49,005 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/C, priority=10, startTime=1732101588861; duration=0sec 2024-11-20T11:19:49,005 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:49,005 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:C 2024-11-20T11:19:49,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207f783f9b1b224f0d851b702d5d467066_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101587580/Put/seqid=0 2024-11-20T11:19:49,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741980_1156 (size=31161) 2024-11-20T11:19:49,020 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/33da7bdcac604f1f9b6cd8e7413a13d7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/33da7bdcac604f1f9b6cd8e7413a13d7 2024-11-20T11:19:49,026 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/A of 8dd1f041f5ff83d363163edeec4cd720 into 33da7bdcac604f1f9b6cd8e7413a13d7(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:49,026 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:49,026 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/A, priority=10, startTime=1732101588861; duration=0sec 2024-11-20T11:19:49,027 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:49,027 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:A 2024-11-20T11:19:49,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741981_1157 (size=12154) 2024-11-20T11:19:49,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,039 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207f783f9b1b224f0d851b702d5d467066_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207f783f9b1b224f0d851b702d5d467066_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:49,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/b1aacd4bc83f49af8962f060882379cb, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:49,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/b1aacd4bc83f49af8962f060882379cb is 175, key is test_row_0/A:col10/1732101587580/Put/seqid=0 2024-11-20T11:19:49,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741982_1158 (size=30955) 2024-11-20T11:19:49,053 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=127, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/b1aacd4bc83f49af8962f060882379cb 2024-11-20T11:19:49,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/76c0a93e640f40f5b64c1dafbc066a05 is 50, key is test_row_0/B:col10/1732101587580/Put/seqid=0 2024-11-20T11:19:49,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741983_1159 (size=12001) 2024-11-20T11:19:49,078 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/76c0a93e640f40f5b64c1dafbc066a05 2024-11-20T11:19:49,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/8102f2fbe71343d2a883f1df402668f8 is 50, key is test_row_0/C:col10/1732101587580/Put/seqid=0 2024-11-20T11:19:49,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741984_1160 (size=12001) 2024-11-20T11:19:49,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T11:19:49,498 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/8102f2fbe71343d2a883f1df402668f8 2024-11-20T11:19:49,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/b1aacd4bc83f49af8962f060882379cb as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/b1aacd4bc83f49af8962f060882379cb 2024-11-20T11:19:49,511 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/b1aacd4bc83f49af8962f060882379cb, entries=150, sequenceid=127, filesize=30.2 K 2024-11-20T11:19:49,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/76c0a93e640f40f5b64c1dafbc066a05 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/76c0a93e640f40f5b64c1dafbc066a05 2024-11-20T11:19:49,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,517 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/76c0a93e640f40f5b64c1dafbc066a05, entries=150, sequenceid=127, filesize=11.7 K 2024-11-20T11:19:49,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/8102f2fbe71343d2a883f1df402668f8 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/8102f2fbe71343d2a883f1df402668f8 2024-11-20T11:19:49,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,524 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/8102f2fbe71343d2a883f1df402668f8, entries=150, sequenceid=127, filesize=11.7 K 2024-11-20T11:19:49,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,525 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for 8dd1f041f5ff83d363163edeec4cd720 in 549ms, sequenceid=127, compaction requested=false 2024-11-20T11:19:49,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:49,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:49,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-20T11:19:49,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-20T11:19:49,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,529 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-20T11:19:49,529 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3170 sec 2024-11-20T11:19:49,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.3220 sec 2024-11-20T11:19:49,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T11:19:49,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:49,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:49,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:49,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:49,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:49,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,795 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120abc157ea001c4e329c5eae06ba92ba23_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101589782/Put/seqid=0 2024-11-20T11:19:49,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741985_1161 (size=24758) 2024-11-20T11:19:49,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,807 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,814 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120abc157ea001c4e329c5eae06ba92ba23_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120abc157ea001c4e329c5eae06ba92ba23_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:49,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,815 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/57da6258d45c46a99dbdbd6280b64cc1, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:49,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/57da6258d45c46a99dbdbd6280b64cc1 is 175, key is test_row_0/A:col10/1732101589782/Put/seqid=0 2024-11-20T11:19:49,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741986_1162 (size=74390) 2024-11-20T11:19:49,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,822 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=140, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/57da6258d45c46a99dbdbd6280b64cc1 2024-11-20T11:19:49,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:49,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/bb7e944d50014f92bbf0a5e7fef53f30 is 50, key is test_row_0/B:col10/1732101589782/Put/seqid=0 2024-11-20T11:19:49,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741987_1163 (size=24101) 2024-11-20T11:19:49,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:49,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101649860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:49,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:49,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101649860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:49,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:49,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101649860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:49,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:49,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101649861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:49,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:49,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101649862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:49,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:49,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101649963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:49,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:49,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101649964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:49,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:49,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101649964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:49,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:49,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101649964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:49,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:49,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101649967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101650167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101650168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101650172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101650172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101650172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,247 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/bb7e944d50014f92bbf0a5e7fef53f30 2024-11-20T11:19:50,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/0e17194387c34942a802fce4fd04858d is 50, key is test_row_0/C:col10/1732101589782/Put/seqid=0 2024-11-20T11:19:50,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741988_1164 (size=9757) 2024-11-20T11:19:50,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T11:19:50,314 INFO [Thread-645 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-20T11:19:50,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:50,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-20T11:19:50,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T11:19:50,319 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:50,320 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:50,320 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:50,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T11:19:50,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101650469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,472 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T11:19:50,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:50,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:50,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:50,473 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:50,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:50,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101650471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:50,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101650474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101650474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101650475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T11:19:50,626 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T11:19:50,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:50,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:50,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:50,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:50,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:50,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:50,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/0e17194387c34942a802fce4fd04858d 2024-11-20T11:19:50,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/57da6258d45c46a99dbdbd6280b64cc1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/57da6258d45c46a99dbdbd6280b64cc1 2024-11-20T11:19:50,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/57da6258d45c46a99dbdbd6280b64cc1, entries=400, sequenceid=140, filesize=72.6 K 2024-11-20T11:19:50,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/bb7e944d50014f92bbf0a5e7fef53f30 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/bb7e944d50014f92bbf0a5e7fef53f30 2024-11-20T11:19:50,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/bb7e944d50014f92bbf0a5e7fef53f30, entries=400, sequenceid=140, filesize=23.5 K 2024-11-20T11:19:50,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/0e17194387c34942a802fce4fd04858d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/0e17194387c34942a802fce4fd04858d 2024-11-20T11:19:50,699 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/0e17194387c34942a802fce4fd04858d, entries=100, sequenceid=140, filesize=9.5 K 2024-11-20T11:19:50,700 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 8dd1f041f5ff83d363163edeec4cd720 in 915ms, sequenceid=140, compaction requested=true 2024-11-20T11:19:50,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:50,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:50,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:50,701 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:50,701 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:50,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:50,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:50,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:50,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:50,702 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136506 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:50,702 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/A is initiating minor compaction (all files) 2024-11-20T11:19:50,702 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 48309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:50,702 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/A in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:50,702 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/B is initiating minor compaction (all files) 2024-11-20T11:19:50,702 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/B in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:50,702 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/33da7bdcac604f1f9b6cd8e7413a13d7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/b1aacd4bc83f49af8962f060882379cb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/57da6258d45c46a99dbdbd6280b64cc1] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=133.3 K 2024-11-20T11:19:50,702 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:50,702 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/e454e56b2aeb439d903173a3d52c685e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/76c0a93e640f40f5b64c1dafbc066a05, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/bb7e944d50014f92bbf0a5e7fef53f30] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=47.2 K 2024-11-20T11:19:50,702 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/33da7bdcac604f1f9b6cd8e7413a13d7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/b1aacd4bc83f49af8962f060882379cb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/57da6258d45c46a99dbdbd6280b64cc1] 2024-11-20T11:19:50,703 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting e454e56b2aeb439d903173a3d52c685e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732101586424 2024-11-20T11:19:50,703 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33da7bdcac604f1f9b6cd8e7413a13d7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732101586424 2024-11-20T11:19:50,703 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 76c0a93e640f40f5b64c1dafbc066a05, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732101587580 2024-11-20T11:19:50,703 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1aacd4bc83f49af8962f060882379cb, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732101587580 2024-11-20T11:19:50,704 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting bb7e944d50014f92bbf0a5e7fef53f30, keycount=400, bloomtype=ROW, size=23.5 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732101589738 2024-11-20T11:19:50,704 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57da6258d45c46a99dbdbd6280b64cc1, keycount=400, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732101589738 2024-11-20T11:19:50,718 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:50,719 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#B#compaction#138 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:50,720 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/303329d728dd4bd5a52fbf826bb5e939 is 50, key is test_row_0/B:col10/1732101589782/Put/seqid=0 2024-11-20T11:19:50,720 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120269bc21a56fb47e591f0ab5156e12b66_8dd1f041f5ff83d363163edeec4cd720 store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:50,724 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120269bc21a56fb47e591f0ab5156e12b66_8dd1f041f5ff83d363163edeec4cd720, store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:50,724 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120269bc21a56fb47e591f0ab5156e12b66_8dd1f041f5ff83d363163edeec4cd720 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:50,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741989_1165 (size=12409) 2024-11-20T11:19:50,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741990_1166 (size=4469) 2024-11-20T11:19:50,779 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-20T11:19:50,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:50,780 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T11:19:50,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:50,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:50,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:50,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:50,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:50,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:50,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120446f304b451c4959a17531b923b37eff_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101589861/Put/seqid=0 2024-11-20T11:19:50,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741991_1167 (size=12304) 2024-11-20T11:19:50,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T11:19:50,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:50,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:50,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101650984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101650985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101650985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101650985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:50,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:50,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101650985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101651088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101651089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101651089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101651089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,149 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/303329d728dd4bd5a52fbf826bb5e939 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/303329d728dd4bd5a52fbf826bb5e939 2024-11-20T11:19:51,151 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#A#compaction#139 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:51,152 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/af60f3f941e04d1cb207e470d04c76e6 is 175, key is test_row_0/A:col10/1732101589782/Put/seqid=0 2024-11-20T11:19:51,157 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/B of 8dd1f041f5ff83d363163edeec4cd720 into 303329d728dd4bd5a52fbf826bb5e939(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:51,157 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:51,157 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/B, priority=13, startTime=1732101590701; duration=0sec 2024-11-20T11:19:51,157 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:51,157 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:B 2024-11-20T11:19:51,157 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:51,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741992_1168 (size=31470) 2024-11-20T11:19:51,159 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:51,159 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/C is initiating minor compaction (all files) 2024-11-20T11:19:51,159 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/C in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:51,159 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/980e839be2bc4973a781b4d56d69dd7e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/8102f2fbe71343d2a883f1df402668f8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/0e17194387c34942a802fce4fd04858d] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=33.2 K 2024-11-20T11:19:51,160 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 980e839be2bc4973a781b4d56d69dd7e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732101586424 2024-11-20T11:19:51,160 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 8102f2fbe71343d2a883f1df402668f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732101587580 2024-11-20T11:19:51,161 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e17194387c34942a802fce4fd04858d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732101589782 2024-11-20T11:19:51,164 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/af60f3f941e04d1cb207e470d04c76e6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/af60f3f941e04d1cb207e470d04c76e6 2024-11-20T11:19:51,171 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/A of 8dd1f041f5ff83d363163edeec4cd720 into af60f3f941e04d1cb207e470d04c76e6(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:51,171 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:51,171 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/A, priority=13, startTime=1732101590700; duration=0sec 2024-11-20T11:19:51,171 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:51,171 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:A 2024-11-20T11:19:51,172 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#C#compaction#141 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:51,172 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/d79e2a86fdff45758814dcdda66a7a01 is 50, key is test_row_0/C:col10/1732101589782/Put/seqid=0 2024-11-20T11:19:51,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741993_1169 (size=12409) 2024-11-20T11:19:51,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:51,210 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120446f304b451c4959a17531b923b37eff_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120446f304b451c4959a17531b923b37eff_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:51,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/3772ab466268497f88f38aef4d8ac682, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:51,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/3772ab466268497f88f38aef4d8ac682 is 175, key is test_row_0/A:col10/1732101589861/Put/seqid=0 2024-11-20T11:19:51,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741994_1170 (size=31105) 2024-11-20T11:19:51,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101651292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101651293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101651294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101651294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T11:19:51,592 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/d79e2a86fdff45758814dcdda66a7a01 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/d79e2a86fdff45758814dcdda66a7a01 2024-11-20T11:19:51,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101651595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101651595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101651595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,598 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/C of 8dd1f041f5ff83d363163edeec4cd720 into d79e2a86fdff45758814dcdda66a7a01(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:51,598 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:51,598 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/C, priority=13, startTime=1732101590701; duration=0sec 2024-11-20T11:19:51,598 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:51,598 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:C 2024-11-20T11:19:51,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101651599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:51,623 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=165, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/3772ab466268497f88f38aef4d8ac682 2024-11-20T11:19:51,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/1c3bd6dafa674bdcab38ecb9982703ed is 50, key is test_row_0/B:col10/1732101589861/Put/seqid=0 2024-11-20T11:19:51,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741995_1171 (size=12151) 2024-11-20T11:19:51,639 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/1c3bd6dafa674bdcab38ecb9982703ed 2024-11-20T11:19:51,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/fd9d949dfb2741e1b910a3cf8b249a33 is 50, key is test_row_0/C:col10/1732101589861/Put/seqid=0 2024-11-20T11:19:51,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741996_1172 (size=12151) 2024-11-20T11:19:51,710 INFO [master/ee8338ed7cc0:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T11:19:51,710 INFO [master/ee8338ed7cc0:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T11:19:51,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:51,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101651989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,051 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/fd9d949dfb2741e1b910a3cf8b249a33 2024-11-20T11:19:52,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/3772ab466268497f88f38aef4d8ac682 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3772ab466268497f88f38aef4d8ac682 2024-11-20T11:19:52,062 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3772ab466268497f88f38aef4d8ac682, entries=150, sequenceid=165, filesize=30.4 K 2024-11-20T11:19:52,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/1c3bd6dafa674bdcab38ecb9982703ed as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/1c3bd6dafa674bdcab38ecb9982703ed 2024-11-20T11:19:52,067 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/1c3bd6dafa674bdcab38ecb9982703ed, entries=150, sequenceid=165, filesize=11.9 K 2024-11-20T11:19:52,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/fd9d949dfb2741e1b910a3cf8b249a33 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/fd9d949dfb2741e1b910a3cf8b249a33 2024-11-20T11:19:52,072 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/fd9d949dfb2741e1b910a3cf8b249a33, entries=150, sequenceid=165, filesize=11.9 K 2024-11-20T11:19:52,074 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 8dd1f041f5ff83d363163edeec4cd720 in 1293ms, sequenceid=165, compaction requested=false 2024-11-20T11:19:52,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:52,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:52,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-20T11:19:52,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-20T11:19:52,078 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-20T11:19:52,078 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7550 sec 2024-11-20T11:19:52,080 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.7620 sec 2024-11-20T11:19:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:52,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T11:19:52,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:52,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:52,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:52,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:52,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:52,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:52,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d0121a1077af43e494bc7d9c09745317_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101592098/Put/seqid=0 2024-11-20T11:19:52,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741997_1173 (size=14794) 2024-11-20T11:19:52,119 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:52,124 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d0121a1077af43e494bc7d9c09745317_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d0121a1077af43e494bc7d9c09745317_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:52,126 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/3aeb0c3e2bc2400691be53d239f41338, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:52,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/3aeb0c3e2bc2400691be53d239f41338 is 175, key is test_row_0/A:col10/1732101592098/Put/seqid=0 2024-11-20T11:19:52,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101652124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101652125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101652126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101652127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741998_1174 (size=39749) 2024-11-20T11:19:52,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101652229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101652229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101652229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101652231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T11:19:52,423 INFO [Thread-645 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-20T11:19:52,424 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-20T11:19:52,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T11:19:52,426 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:52,426 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:52,427 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:52,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101652433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101652434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101652434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101652434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T11:19:52,550 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=182, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/3aeb0c3e2bc2400691be53d239f41338 2024-11-20T11:19:52,559 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/524d8cb3f93e4506a12a838e471acea5 is 50, key is test_row_0/B:col10/1732101592098/Put/seqid=0 2024-11-20T11:19:52,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741999_1175 (size=12151) 2024-11-20T11:19:52,579 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,579 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T11:19:52,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:52,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:52,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:52,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:52,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T11:19:52,732 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,732 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T11:19:52,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:52,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:52,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:52,732 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:52,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:52,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101652737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101652737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101652737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101652739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,885 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:52,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T11:19:52,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:52,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:52,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:52,886 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:52,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:52,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:52,976 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/524d8cb3f93e4506a12a838e471acea5 2024-11-20T11:19:52,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/54b6f1aba74a4454a56f75cd3214f3b8 is 50, key is test_row_0/C:col10/1732101592098/Put/seqid=0 2024-11-20T11:19:52,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742000_1176 (size=12151) 2024-11-20T11:19:52,991 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/54b6f1aba74a4454a56f75cd3214f3b8 2024-11-20T11:19:52,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/3aeb0c3e2bc2400691be53d239f41338 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3aeb0c3e2bc2400691be53d239f41338 2024-11-20T11:19:53,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3aeb0c3e2bc2400691be53d239f41338, entries=200, sequenceid=182, filesize=38.8 K 2024-11-20T11:19:53,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/524d8cb3f93e4506a12a838e471acea5 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/524d8cb3f93e4506a12a838e471acea5 2024-11-20T11:19:53,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/524d8cb3f93e4506a12a838e471acea5, entries=150, sequenceid=182, filesize=11.9 K 2024-11-20T11:19:53,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/54b6f1aba74a4454a56f75cd3214f3b8 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/54b6f1aba74a4454a56f75cd3214f3b8 2024-11-20T11:19:53,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/54b6f1aba74a4454a56f75cd3214f3b8, entries=150, sequenceid=182, filesize=11.9 K 2024-11-20T11:19:53,015 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 8dd1f041f5ff83d363163edeec4cd720 in 916ms, sequenceid=182, compaction requested=true 2024-11-20T11:19:53,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:53,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:53,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:53,016 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:53,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:53,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:53,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:53,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T11:19:53,016 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:53,018 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102324 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:53,018 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/A is initiating minor compaction (all files) 2024-11-20T11:19:53,018 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/A in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:53,018 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/af60f3f941e04d1cb207e470d04c76e6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3772ab466268497f88f38aef4d8ac682, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3aeb0c3e2bc2400691be53d239f41338] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=99.9 K 2024-11-20T11:19:53,018 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:53,018 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/af60f3f941e04d1cb207e470d04c76e6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3772ab466268497f88f38aef4d8ac682, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3aeb0c3e2bc2400691be53d239f41338] 2024-11-20T11:19:53,019 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:53,019 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/B is initiating minor compaction (all files) 2024-11-20T11:19:53,019 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/B in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:53,019 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/303329d728dd4bd5a52fbf826bb5e939, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/1c3bd6dafa674bdcab38ecb9982703ed, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/524d8cb3f93e4506a12a838e471acea5] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=35.9 K 2024-11-20T11:19:53,019 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting af60f3f941e04d1cb207e470d04c76e6, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732101587581 2024-11-20T11:19:53,019 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 303329d728dd4bd5a52fbf826bb5e939, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732101587581 2024-11-20T11:19:53,020 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3772ab466268497f88f38aef4d8ac682, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1732101589858 2024-11-20T11:19:53,020 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c3bd6dafa674bdcab38ecb9982703ed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1732101589858 2024-11-20T11:19:53,020 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3aeb0c3e2bc2400691be53d239f41338, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732101590980 2024-11-20T11:19:53,021 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 524d8cb3f93e4506a12a838e471acea5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732101590980 2024-11-20T11:19:53,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T11:19:53,030 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:53,031 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#B#compaction#147 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:53,032 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/7d3f6add7ca74127b52f800ceb51b67c is 50, key is test_row_0/B:col10/1732101592098/Put/seqid=0 2024-11-20T11:19:53,032 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120d408f36146784990bf46346a9b97c84c_8dd1f041f5ff83d363163edeec4cd720 store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:53,034 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120d408f36146784990bf46346a9b97c84c_8dd1f041f5ff83d363163edeec4cd720, store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:53,035 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d408f36146784990bf46346a9b97c84c_8dd1f041f5ff83d363163edeec4cd720 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:53,038 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T11:19:53,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:53,039 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T11:19:53,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:53,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:53,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:53,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:53,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:53,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:53,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742001_1177 (size=12561) 2024-11-20T11:19:53,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742002_1178 (size=4469) 2024-11-20T11:19:53,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201ce53d5e494343049cf052b5d6908518_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101592122/Put/seqid=0 2024-11-20T11:19:53,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742003_1179 (size=12304) 2024-11-20T11:19:53,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:53,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101653250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101653252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101653252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101653253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101653354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101653358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101653358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101653358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,450 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#A#compaction#148 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:53,451 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/7d3f6add7ca74127b52f800ceb51b67c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/7d3f6add7ca74127b52f800ceb51b67c 2024-11-20T11:19:53,451 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/effd967352dd477b8e55715c0ceb6c59 is 175, key is test_row_0/A:col10/1732101592098/Put/seqid=0 2024-11-20T11:19:53,458 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/B of 8dd1f041f5ff83d363163edeec4cd720 into 7d3f6add7ca74127b52f800ceb51b67c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:53,458 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:53,458 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/B, priority=13, startTime=1732101593016; duration=0sec 2024-11-20T11:19:53,458 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:53,458 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:B 2024-11-20T11:19:53,458 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:53,459 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:53,459 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/C is initiating minor compaction (all files) 2024-11-20T11:19:53,459 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/C in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:53,459 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/d79e2a86fdff45758814dcdda66a7a01, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/fd9d949dfb2741e1b910a3cf8b249a33, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/54b6f1aba74a4454a56f75cd3214f3b8] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=35.9 K 2024-11-20T11:19:53,460 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting d79e2a86fdff45758814dcdda66a7a01, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732101587581 2024-11-20T11:19:53,460 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting fd9d949dfb2741e1b910a3cf8b249a33, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1732101589858 2024-11-20T11:19:53,461 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 54b6f1aba74a4454a56f75cd3214f3b8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732101590980 2024-11-20T11:19:53,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742004_1180 (size=31515) 2024-11-20T11:19:53,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:53,468 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/effd967352dd477b8e55715c0ceb6c59 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/effd967352dd477b8e55715c0ceb6c59 2024-11-20T11:19:53,470 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201ce53d5e494343049cf052b5d6908518_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201ce53d5e494343049cf052b5d6908518_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:53,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/0a9b66f7af844e62b19c13b88261c6c9, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:53,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/0a9b66f7af844e62b19c13b88261c6c9 is 175, key is test_row_0/A:col10/1732101592122/Put/seqid=0 2024-11-20T11:19:53,474 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#C#compaction#150 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:53,474 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/1f28ba679858450293f59bfaf3f00a9d is 50, key is test_row_0/C:col10/1732101592098/Put/seqid=0 2024-11-20T11:19:53,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742005_1181 (size=31105) 2024-11-20T11:19:53,478 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=204, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/0a9b66f7af844e62b19c13b88261c6c9 2024-11-20T11:19:53,479 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/A of 8dd1f041f5ff83d363163edeec4cd720 into effd967352dd477b8e55715c0ceb6c59(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:53,479 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:53,480 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/A, priority=13, startTime=1732101593016; duration=0sec 2024-11-20T11:19:53,481 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:53,481 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:A 2024-11-20T11:19:53,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/0a6aeb737f3a4f2e9d04c37c9e4a893c is 50, key is test_row_0/B:col10/1732101592122/Put/seqid=0 2024-11-20T11:19:53,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742006_1182 (size=12561) 2024-11-20T11:19:53,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742007_1183 (size=12151) 2024-11-20T11:19:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T11:19:53,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101653556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101653560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101653561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101653562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101653858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101653862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101653865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:53,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101653865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:53,904 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/0a6aeb737f3a4f2e9d04c37c9e4a893c 2024-11-20T11:19:53,910 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/1f28ba679858450293f59bfaf3f00a9d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/1f28ba679858450293f59bfaf3f00a9d 2024-11-20T11:19:53,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/0b2434c027d049d08a22f2944f857ada is 50, key is test_row_0/C:col10/1732101592122/Put/seqid=0 2024-11-20T11:19:53,915 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/C of 8dd1f041f5ff83d363163edeec4cd720 into 1f28ba679858450293f59bfaf3f00a9d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:53,915 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:53,915 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/C, priority=13, startTime=1732101593016; duration=0sec 2024-11-20T11:19:53,915 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:53,915 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:C 2024-11-20T11:19:53,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742008_1184 (size=12151) 2024-11-20T11:19:53,925 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/0b2434c027d049d08a22f2944f857ada 2024-11-20T11:19:53,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/0a9b66f7af844e62b19c13b88261c6c9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/0a9b66f7af844e62b19c13b88261c6c9 2024-11-20T11:19:53,935 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/0a9b66f7af844e62b19c13b88261c6c9, entries=150, sequenceid=204, filesize=30.4 K 2024-11-20T11:19:53,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/0a6aeb737f3a4f2e9d04c37c9e4a893c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/0a6aeb737f3a4f2e9d04c37c9e4a893c 2024-11-20T11:19:53,941 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/0a6aeb737f3a4f2e9d04c37c9e4a893c, entries=150, sequenceid=204, filesize=11.9 K 2024-11-20T11:19:53,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/0b2434c027d049d08a22f2944f857ada as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/0b2434c027d049d08a22f2944f857ada 2024-11-20T11:19:53,947 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/0b2434c027d049d08a22f2944f857ada, entries=150, sequenceid=204, filesize=11.9 K 2024-11-20T11:19:53,951 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 8dd1f041f5ff83d363163edeec4cd720 in 913ms, sequenceid=204, compaction requested=false 2024-11-20T11:19:53,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:53,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:53,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-20T11:19:53,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-20T11:19:53,954 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-20T11:19:53,954 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5260 sec 2024-11-20T11:19:53,957 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 1.5310 sec 2024-11-20T11:19:53,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:53,998 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T11:19:53,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:53,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:53,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:53,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:53,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:53,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:54,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ec149889ad824605bdd894b410191984_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101593252/Put/seqid=0 2024-11-20T11:19:54,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742009_1185 (size=14794) 2024-11-20T11:19:54,013 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:54,017 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ec149889ad824605bdd894b410191984_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ec149889ad824605bdd894b410191984_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:54,019 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/f37359dcbc9f445393537601f8faebce, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:54,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/f37359dcbc9f445393537601f8faebce is 175, key is test_row_0/A:col10/1732101593252/Put/seqid=0 2024-11-20T11:19:54,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742010_1186 (size=39749) 2024-11-20T11:19:54,024 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=223, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/f37359dcbc9f445393537601f8faebce 2024-11-20T11:19:54,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/691875146ac9467b9b6beb032caf9f4c is 50, key is test_row_0/B:col10/1732101593252/Put/seqid=0 2024-11-20T11:19:54,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742011_1187 (size=12151) 2024-11-20T11:19:54,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:54,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101654052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:54,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:54,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101654155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:54,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:54,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101654358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:54,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:54,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101654361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:54,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:54,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101654366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:54,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:54,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101654367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:54,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:54,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101654368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:54,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/691875146ac9467b9b6beb032caf9f4c 2024-11-20T11:19:54,457 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/847a725825624625afc02b19450d7747 is 50, key is test_row_0/C:col10/1732101593252/Put/seqid=0 2024-11-20T11:19:54,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742012_1188 (size=12151) 2024-11-20T11:19:54,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T11:19:54,531 INFO [Thread-645 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-20T11:19:54,532 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:54,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-11-20T11:19:54,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T11:19:54,534 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:54,535 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:54,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:54,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T11:19:54,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:54,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101654661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:54,686 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:54,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-20T11:19:54,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:54,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:54,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:54,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:54,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:54,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:54,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T11:19:54,839 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:54,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-20T11:19:54,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:54,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:54,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:54,840 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:54,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:54,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:54,873 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/847a725825624625afc02b19450d7747 2024-11-20T11:19:54,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/f37359dcbc9f445393537601f8faebce as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/f37359dcbc9f445393537601f8faebce 2024-11-20T11:19:54,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/f37359dcbc9f445393537601f8faebce, entries=200, sequenceid=223, filesize=38.8 K 2024-11-20T11:19:54,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/691875146ac9467b9b6beb032caf9f4c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/691875146ac9467b9b6beb032caf9f4c 2024-11-20T11:19:54,891 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/691875146ac9467b9b6beb032caf9f4c, entries=150, sequenceid=223, filesize=11.9 K 2024-11-20T11:19:54,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/847a725825624625afc02b19450d7747 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/847a725825624625afc02b19450d7747 2024-11-20T11:19:54,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/847a725825624625afc02b19450d7747, entries=150, sequenceid=223, filesize=11.9 K 2024-11-20T11:19:54,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 8dd1f041f5ff83d363163edeec4cd720 in 900ms, sequenceid=223, compaction requested=true 2024-11-20T11:19:54,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:54,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:54,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:54,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:54,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:54,899 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:54,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:54,899 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:54,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:54,900 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:54,900 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:54,900 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/A is initiating minor compaction (all files) 2024-11-20T11:19:54,900 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/B is initiating minor compaction (all files) 2024-11-20T11:19:54,900 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/A in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:54,900 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/B in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:54,900 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/effd967352dd477b8e55715c0ceb6c59, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/0a9b66f7af844e62b19c13b88261c6c9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/f37359dcbc9f445393537601f8faebce] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=100.0 K 2024-11-20T11:19:54,900 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/7d3f6add7ca74127b52f800ceb51b67c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/0a6aeb737f3a4f2e9d04c37c9e4a893c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/691875146ac9467b9b6beb032caf9f4c] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=36.0 K 2024-11-20T11:19:54,900 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:54,900 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/effd967352dd477b8e55715c0ceb6c59, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/0a9b66f7af844e62b19c13b88261c6c9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/f37359dcbc9f445393537601f8faebce] 2024-11-20T11:19:54,900 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d3f6add7ca74127b52f800ceb51b67c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732101590980 2024-11-20T11:19:54,901 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting effd967352dd477b8e55715c0ceb6c59, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732101590980 2024-11-20T11:19:54,901 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a6aeb737f3a4f2e9d04c37c9e4a893c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732101592122 2024-11-20T11:19:54,901 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a9b66f7af844e62b19c13b88261c6c9, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732101592122 2024-11-20T11:19:54,901 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 691875146ac9467b9b6beb032caf9f4c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732101593251 2024-11-20T11:19:54,902 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting f37359dcbc9f445393537601f8faebce, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732101593251 2024-11-20T11:19:54,909 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:54,910 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#B#compaction#156 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:54,911 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/68f4b46e33774e1898219b6b75265c67 is 50, key is test_row_0/B:col10/1732101593252/Put/seqid=0 2024-11-20T11:19:54,911 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120fb0e44f9fd6b45c496d44f63be3d94a0_8dd1f041f5ff83d363163edeec4cd720 store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:54,913 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120fb0e44f9fd6b45c496d44f63be3d94a0_8dd1f041f5ff83d363163edeec4cd720, store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:54,914 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fb0e44f9fd6b45c496d44f63be3d94a0_8dd1f041f5ff83d363163edeec4cd720 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:54,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742013_1189 (size=12663) 2024-11-20T11:19:54,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742014_1190 (size=4469) 2024-11-20T11:19:54,941 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#A#compaction#157 average throughput is 0.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:54,942 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/11baddf9d75c45938566508eb2bfbe01 is 175, key is test_row_0/A:col10/1732101593252/Put/seqid=0 2024-11-20T11:19:54,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742015_1191 (size=31617) 2024-11-20T11:19:54,992 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:54,993 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-20T11:19:54,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:54,993 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T11:19:54,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:54,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:54,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:54,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:54,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:54,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:55,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ed89989888554dc994ac400a20f31a6f_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101594000/Put/seqid=0 2024-11-20T11:19:55,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742016_1192 (size=12304) 2024-11-20T11:19:55,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T11:19:55,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:55,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:55,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:55,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101655204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:55,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:55,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101655307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:55,329 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/68f4b46e33774e1898219b6b75265c67 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/68f4b46e33774e1898219b6b75265c67 2024-11-20T11:19:55,335 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/B of 8dd1f041f5ff83d363163edeec4cd720 into 68f4b46e33774e1898219b6b75265c67(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:55,335 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:55,335 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/B, priority=13, startTime=1732101594899; duration=0sec 2024-11-20T11:19:55,335 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:55,335 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:B 2024-11-20T11:19:55,335 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:55,336 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:55,336 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/C is initiating minor compaction (all files) 2024-11-20T11:19:55,337 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/C in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:55,337 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/1f28ba679858450293f59bfaf3f00a9d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/0b2434c027d049d08a22f2944f857ada, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/847a725825624625afc02b19450d7747] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=36.0 K 2024-11-20T11:19:55,337 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f28ba679858450293f59bfaf3f00a9d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732101590980 2024-11-20T11:19:55,337 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b2434c027d049d08a22f2944f857ada, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732101592122 2024-11-20T11:19:55,338 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 847a725825624625afc02b19450d7747, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732101593251 2024-11-20T11:19:55,347 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#C#compaction#159 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:55,347 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/b00995c4e64f45f9aee2395f46612513 is 50, key is test_row_0/C:col10/1732101593252/Put/seqid=0 2024-11-20T11:19:55,353 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/11baddf9d75c45938566508eb2bfbe01 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/11baddf9d75c45938566508eb2bfbe01 2024-11-20T11:19:55,359 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/A of 8dd1f041f5ff83d363163edeec4cd720 into 11baddf9d75c45938566508eb2bfbe01(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:55,359 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:55,359 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/A, priority=13, startTime=1732101594898; duration=0sec 2024-11-20T11:19:55,359 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:55,359 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:A 2024-11-20T11:19:55,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742017_1193 (size=12663) 2024-11-20T11:19:55,370 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/b00995c4e64f45f9aee2395f46612513 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/b00995c4e64f45f9aee2395f46612513 2024-11-20T11:19:55,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:55,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101655369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:55,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:55,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101655371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:55,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:55,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101655374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:55,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:55,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101655374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:55,376 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/C of 8dd1f041f5ff83d363163edeec4cd720 into b00995c4e64f45f9aee2395f46612513(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:55,376 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:55,376 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/C, priority=13, startTime=1732101594899; duration=0sec 2024-11-20T11:19:55,376 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:55,376 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:C 2024-11-20T11:19:55,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:55,417 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ed89989888554dc994ac400a20f31a6f_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ed89989888554dc994ac400a20f31a6f_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:55,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/5e4efb7b3baf4e6bbdec80b8bdbbf54a, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:55,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/5e4efb7b3baf4e6bbdec80b8bdbbf54a is 175, key is test_row_0/A:col10/1732101594000/Put/seqid=0 2024-11-20T11:19:55,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742018_1194 (size=31105) 2024-11-20T11:19:55,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101655510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:55,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T11:19:55,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101655814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:55,828 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=243, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/5e4efb7b3baf4e6bbdec80b8bdbbf54a 2024-11-20T11:19:55,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/685c5a05cc3c420caaef6dfb86b3bf5c is 50, key is test_row_0/B:col10/1732101594000/Put/seqid=0 2024-11-20T11:19:55,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742019_1195 (size=12151) 2024-11-20T11:19:56,244 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/685c5a05cc3c420caaef6dfb86b3bf5c 2024-11-20T11:19:56,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/21c9b45be6c44403bef1a482bdd3049d is 50, key is test_row_0/C:col10/1732101594000/Put/seqid=0 2024-11-20T11:19:56,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742020_1196 (size=12151) 2024-11-20T11:19:56,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:56,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101656317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:56,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T11:19:56,658 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/21c9b45be6c44403bef1a482bdd3049d 2024-11-20T11:19:56,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/5e4efb7b3baf4e6bbdec80b8bdbbf54a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/5e4efb7b3baf4e6bbdec80b8bdbbf54a 2024-11-20T11:19:56,669 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/5e4efb7b3baf4e6bbdec80b8bdbbf54a, entries=150, sequenceid=243, filesize=30.4 K 2024-11-20T11:19:56,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/685c5a05cc3c420caaef6dfb86b3bf5c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/685c5a05cc3c420caaef6dfb86b3bf5c 2024-11-20T11:19:56,675 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/685c5a05cc3c420caaef6dfb86b3bf5c, entries=150, sequenceid=243, filesize=11.9 K 2024-11-20T11:19:56,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/21c9b45be6c44403bef1a482bdd3049d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/21c9b45be6c44403bef1a482bdd3049d 2024-11-20T11:19:56,680 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/21c9b45be6c44403bef1a482bdd3049d, entries=150, sequenceid=243, filesize=11.9 K 2024-11-20T11:19:56,681 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 8dd1f041f5ff83d363163edeec4cd720 in 1688ms, sequenceid=243, compaction requested=false 2024-11-20T11:19:56,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:56,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:56,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-20T11:19:56,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-11-20T11:19:56,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-20T11:19:56,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1480 sec 2024-11-20T11:19:56,686 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 2.1530 sec 2024-11-20T11:19:57,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:57,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T11:19:57,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:57,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:57,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:57,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:57,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:57,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:57,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d2347eeafab54f2f906fc4d72dafc55f_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101595192/Put/seqid=0 2024-11-20T11:19:57,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742021_1197 (size=12404) 2024-11-20T11:19:57,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:57,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101657372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:57,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:57,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54608 deadline: 1732101657374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:57,376 DEBUG [Thread-639 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4124 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:19:57,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:57,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54604 deadline: 1732101657378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:57,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:57,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54578 deadline: 1732101657379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:57,381 DEBUG [Thread-643 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4128 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:19:57,381 DEBUG [Thread-635 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4129 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:19:57,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:57,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54582 deadline: 1732101657391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:57,394 DEBUG [Thread-641 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:19:57,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:57,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101657475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:57,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:57,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101657677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:57,753 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:57,757 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d2347eeafab54f2f906fc4d72dafc55f_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d2347eeafab54f2f906fc4d72dafc55f_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:57,759 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/e83ec71bbda745e889c6e242423adb46, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:57,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/e83ec71bbda745e889c6e242423adb46 is 175, key is test_row_0/A:col10/1732101595192/Put/seqid=0 2024-11-20T11:19:57,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742022_1198 (size=31205) 2024-11-20T11:19:57,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:57,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101657981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:58,165 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=263, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/e83ec71bbda745e889c6e242423adb46 2024-11-20T11:19:58,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/b02d1068de7a49059997c5189fdf6cbb is 50, key is test_row_0/B:col10/1732101595192/Put/seqid=0 2024-11-20T11:19:58,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742023_1199 (size=12251) 2024-11-20T11:19:58,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:58,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101658486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:58,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/b02d1068de7a49059997c5189fdf6cbb 2024-11-20T11:19:58,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/593abbe9e4aa4b94a81701997f9216ac is 50, key is test_row_0/C:col10/1732101595192/Put/seqid=0 2024-11-20T11:19:58,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742024_1200 (size=12251) 2024-11-20T11:19:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T11:19:58,639 INFO [Thread-645 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-20T11:19:58,640 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:19:58,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-11-20T11:19:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T11:19:58,642 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:19:58,642 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:19:58,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:19:58,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T11:19:58,794 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:58,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-20T11:19:58,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:58,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:58,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:58,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:58,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:58,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:58,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T11:19:58,947 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:58,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-20T11:19:58,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:58,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:58,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:58,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:58,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:58,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:19:59,022 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/593abbe9e4aa4b94a81701997f9216ac 2024-11-20T11:19:59,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/e83ec71bbda745e889c6e242423adb46 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e83ec71bbda745e889c6e242423adb46 2024-11-20T11:19:59,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e83ec71bbda745e889c6e242423adb46, entries=150, sequenceid=263, filesize=30.5 K 2024-11-20T11:19:59,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/b02d1068de7a49059997c5189fdf6cbb as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/b02d1068de7a49059997c5189fdf6cbb 2024-11-20T11:19:59,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/b02d1068de7a49059997c5189fdf6cbb, entries=150, sequenceid=263, filesize=12.0 K 2024-11-20T11:19:59,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/593abbe9e4aa4b94a81701997f9216ac as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/593abbe9e4aa4b94a81701997f9216ac 2024-11-20T11:19:59,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/593abbe9e4aa4b94a81701997f9216ac, entries=150, sequenceid=263, filesize=12.0 K 2024-11-20T11:19:59,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 8dd1f041f5ff83d363163edeec4cd720 in 1717ms, sequenceid=263, compaction requested=true 2024-11-20T11:19:59,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:59,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:19:59,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:59,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:19:59,044 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:59,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:59,044 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:59,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:19:59,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:59,045 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93927 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:59,045 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:59,045 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/A is initiating minor compaction (all files) 2024-11-20T11:19:59,045 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/B is initiating minor compaction (all files) 2024-11-20T11:19:59,045 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/A in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:59,045 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/B in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:59,045 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/11baddf9d75c45938566508eb2bfbe01, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/5e4efb7b3baf4e6bbdec80b8bdbbf54a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e83ec71bbda745e889c6e242423adb46] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=91.7 K 2024-11-20T11:19:59,045 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/68f4b46e33774e1898219b6b75265c67, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/685c5a05cc3c420caaef6dfb86b3bf5c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/b02d1068de7a49059997c5189fdf6cbb] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=36.2 K 2024-11-20T11:19:59,045 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:59,045 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/11baddf9d75c45938566508eb2bfbe01, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/5e4efb7b3baf4e6bbdec80b8bdbbf54a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e83ec71bbda745e889c6e242423adb46] 2024-11-20T11:19:59,046 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 68f4b46e33774e1898219b6b75265c67, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732101593251 2024-11-20T11:19:59,046 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11baddf9d75c45938566508eb2bfbe01, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732101593251 2024-11-20T11:19:59,046 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e4efb7b3baf4e6bbdec80b8bdbbf54a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732101594000 2024-11-20T11:19:59,046 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 685c5a05cc3c420caaef6dfb86b3bf5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732101594000 2024-11-20T11:19:59,046 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting b02d1068de7a49059997c5189fdf6cbb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732101595192 2024-11-20T11:19:59,046 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting e83ec71bbda745e889c6e242423adb46, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732101595192 2024-11-20T11:19:59,055 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:59,057 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#B#compaction#166 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:59,058 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/e63483191d934ea0ae54ff270f574b48 is 50, key is test_row_0/B:col10/1732101595192/Put/seqid=0 2024-11-20T11:19:59,059 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120bca8fd93739c4fba9255db80ac597c01_8dd1f041f5ff83d363163edeec4cd720 store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:59,060 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120bca8fd93739c4fba9255db80ac597c01_8dd1f041f5ff83d363163edeec4cd720, store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:59,061 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bca8fd93739c4fba9255db80ac597c01_8dd1f041f5ff83d363163edeec4cd720 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:59,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742025_1201 (size=12865) 2024-11-20T11:19:59,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742026_1202 (size=4469) 2024-11-20T11:19:59,100 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:59,100 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-20T11:19:59,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:59,101 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T11:19:59,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:19:59,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:59,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:19:59,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:59,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:19:59,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:19:59,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112081c1e3a6a49f45419938ebb482bb7a9c_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101597364/Put/seqid=0 2024-11-20T11:19:59,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742027_1203 (size=12454) 2024-11-20T11:19:59,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T11:19:59,473 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#A#compaction#165 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:59,474 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/d8d857ae983a4da096cbebc62227bde9 is 175, key is test_row_0/A:col10/1732101595192/Put/seqid=0 2024-11-20T11:19:59,476 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/e63483191d934ea0ae54ff270f574b48 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/e63483191d934ea0ae54ff270f574b48 2024-11-20T11:19:59,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742028_1204 (size=31819) 2024-11-20T11:19:59,483 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/B of 8dd1f041f5ff83d363163edeec4cd720 into e63483191d934ea0ae54ff270f574b48(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:59,483 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:59,483 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/B, priority=13, startTime=1732101599044; duration=0sec 2024-11-20T11:19:59,484 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:19:59,484 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:B 2024-11-20T11:19:59,484 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:19:59,485 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:19:59,485 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 8dd1f041f5ff83d363163edeec4cd720/C is initiating minor compaction (all files) 2024-11-20T11:19:59,485 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dd1f041f5ff83d363163edeec4cd720/C in TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:19:59,485 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/b00995c4e64f45f9aee2395f46612513, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/21c9b45be6c44403bef1a482bdd3049d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/593abbe9e4aa4b94a81701997f9216ac] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp, totalSize=36.2 K 2024-11-20T11:19:59,486 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting b00995c4e64f45f9aee2395f46612513, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732101593251 2024-11-20T11:19:59,486 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 21c9b45be6c44403bef1a482bdd3049d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732101594000 2024-11-20T11:19:59,487 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 593abbe9e4aa4b94a81701997f9216ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732101595192 2024-11-20T11:19:59,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:59,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. as already flushing 2024-11-20T11:19:59,497 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dd1f041f5ff83d363163edeec4cd720#C#compaction#168 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:19:59,497 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/37b54bc727144943b5a0f326c4ef3a90 is 50, key is test_row_0/C:col10/1732101595192/Put/seqid=0 2024-11-20T11:19:59,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742029_1205 (size=12865) 2024-11-20T11:19:59,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:19:59,519 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112081c1e3a6a49f45419938ebb482bb7a9c_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112081c1e3a6a49f45419938ebb482bb7a9c_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:19:59,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/d9a0751cf0a04d71bbb94f8ef03e7120, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:19:59,521 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/37b54bc727144943b5a0f326c4ef3a90 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/37b54bc727144943b5a0f326c4ef3a90 2024-11-20T11:19:59,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/d9a0751cf0a04d71bbb94f8ef03e7120 is 175, key is test_row_0/A:col10/1732101597364/Put/seqid=0 2024-11-20T11:19:59,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742030_1206 (size=31255) 2024-11-20T11:19:59,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:59,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101659523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:59,527 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/C of 8dd1f041f5ff83d363163edeec4cd720 into 37b54bc727144943b5a0f326c4ef3a90(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:59,527 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:59,527 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/C, priority=13, startTime=1732101599044; duration=0sec 2024-11-20T11:19:59,527 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:59,527 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:C 2024-11-20T11:19:59,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:59,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101659627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T11:19:59,764 DEBUG [Thread-648 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x517ff977 to 127.0.0.1:62733 2024-11-20T11:19:59,764 DEBUG [Thread-648 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:59,765 DEBUG [Thread-646 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x247c0c93 to 127.0.0.1:62733 2024-11-20T11:19:59,765 DEBUG [Thread-646 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:59,765 DEBUG [Thread-650 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3448d233 to 127.0.0.1:62733 2024-11-20T11:19:59,765 DEBUG [Thread-650 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:59,765 DEBUG [Thread-652 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a11164b to 127.0.0.1:62733 2024-11-20T11:19:59,765 DEBUG [Thread-652 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:19:59,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:19:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101659829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:19:59,884 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/d8d857ae983a4da096cbebc62227bde9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/d8d857ae983a4da096cbebc62227bde9 2024-11-20T11:19:59,889 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dd1f041f5ff83d363163edeec4cd720/A of 8dd1f041f5ff83d363163edeec4cd720 into d8d857ae983a4da096cbebc62227bde9(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:19:59,889 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:19:59,889 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720., storeName=8dd1f041f5ff83d363163edeec4cd720/A, priority=13, startTime=1732101599044; duration=0sec 2024-11-20T11:19:59,889 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:19:59,889 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:A 2024-11-20T11:19:59,926 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=282, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/d9a0751cf0a04d71bbb94f8ef03e7120 2024-11-20T11:19:59,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/d4b7d02b2d0e4e02bf9dbd1754db5e22 is 50, key is test_row_0/B:col10/1732101597364/Put/seqid=0 2024-11-20T11:19:59,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742031_1207 (size=12301) 2024-11-20T11:20:00,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101660131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:00,337 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/d4b7d02b2d0e4e02bf9dbd1754db5e22 2024-11-20T11:20:00,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/9d9f3d9e8b6a4cf98f49063242bdf492 is 50, key is test_row_0/C:col10/1732101597364/Put/seqid=0 2024-11-20T11:20:00,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742032_1208 (size=12301) 2024-11-20T11:20:00,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:00,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54624 deadline: 1732101660635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:00,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T11:20:00,748 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/9d9f3d9e8b6a4cf98f49063242bdf492 2024-11-20T11:20:00,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/d9a0751cf0a04d71bbb94f8ef03e7120 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/d9a0751cf0a04d71bbb94f8ef03e7120 2024-11-20T11:20:00,757 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/d9a0751cf0a04d71bbb94f8ef03e7120, entries=150, sequenceid=282, filesize=30.5 K 2024-11-20T11:20:00,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/d4b7d02b2d0e4e02bf9dbd1754db5e22 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/d4b7d02b2d0e4e02bf9dbd1754db5e22 2024-11-20T11:20:00,761 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/d4b7d02b2d0e4e02bf9dbd1754db5e22, entries=150, sequenceid=282, filesize=12.0 K 2024-11-20T11:20:00,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/9d9f3d9e8b6a4cf98f49063242bdf492 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/9d9f3d9e8b6a4cf98f49063242bdf492 2024-11-20T11:20:00,765 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/9d9f3d9e8b6a4cf98f49063242bdf492, entries=150, sequenceid=282, filesize=12.0 K 2024-11-20T11:20:00,766 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 8dd1f041f5ff83d363163edeec4cd720 in 1665ms, sequenceid=282, compaction requested=false 2024-11-20T11:20:00,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:20:00,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:20:00,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-11-20T11:20:00,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-11-20T11:20:00,768 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-20T11:20:00,768 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1240 sec 2024-11-20T11:20:00,769 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 2.1290 sec 2024-11-20T11:20:01,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:01,385 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T11:20:01,385 DEBUG [Thread-643 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a4c53ed to 127.0.0.1:62733 2024-11-20T11:20:01,385 DEBUG [Thread-643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:01,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:20:01,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:01,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:20:01,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:01,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:20:01,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:01,389 DEBUG [Thread-639 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0b44b1e5 to 127.0.0.1:62733 2024-11-20T11:20:01,389 DEBUG [Thread-639 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:01,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203de1730e008445008d237257f2b823a2_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101599499/Put/seqid=0 2024-11-20T11:20:01,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742033_1209 (size=12454) 2024-11-20T11:20:01,402 DEBUG [Thread-635 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cae6c5c to 127.0.0.1:62733 2024-11-20T11:20:01,402 DEBUG [Thread-635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:01,402 DEBUG [Thread-641 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e904d8 to 127.0.0.1:62733 2024-11-20T11:20:01,402 DEBUG [Thread-641 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:01,638 DEBUG [Thread-637 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c820ef9 to 127.0.0.1:62733 2024-11-20T11:20:01,638 DEBUG [Thread-637 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:01,796 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:01,800 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203de1730e008445008d237257f2b823a2_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203de1730e008445008d237257f2b823a2_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:01,801 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/041a6263e8cd48cd9f825210fb0c7c5a, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:20:01,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/041a6263e8cd48cd9f825210fb0c7c5a is 175, key is test_row_0/A:col10/1732101599499/Put/seqid=0 2024-11-20T11:20:01,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742034_1210 (size=31255) 2024-11-20T11:20:02,206 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=303, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/041a6263e8cd48cd9f825210fb0c7c5a 2024-11-20T11:20:02,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/c0d494e4f9a84d8e9f326e3dd91a29f2 is 50, key is test_row_0/B:col10/1732101599499/Put/seqid=0 2024-11-20T11:20:02,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742035_1211 (size=12301) 2024-11-20T11:20:02,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/c0d494e4f9a84d8e9f326e3dd91a29f2 2024-11-20T11:20:02,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/511100b33e4e4c42b5c8175d4dba0b82 is 50, key is test_row_0/C:col10/1732101599499/Put/seqid=0 2024-11-20T11:20:02,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742036_1212 (size=12301) 2024-11-20T11:20:02,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T11:20:02,746 INFO [Thread-645 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 35 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 41 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7342 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7328 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3141 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9422 rows 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3146 2024-11-20T11:20:02,747 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9437 rows 2024-11-20T11:20:02,747 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T11:20:02,747 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a9b9802 to 127.0.0.1:62733 2024-11-20T11:20:02,747 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:02,749 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T11:20:02,749 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T11:20:02,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:02,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T11:20:02,754 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101602753"}]},"ts":"1732101602753"} 2024-11-20T11:20:02,755 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T11:20:02,761 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T11:20:02,762 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T11:20:02,763 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dd1f041f5ff83d363163edeec4cd720, UNASSIGN}] 2024-11-20T11:20:02,763 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dd1f041f5ff83d363163edeec4cd720, UNASSIGN 2024-11-20T11:20:02,764 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=8dd1f041f5ff83d363163edeec4cd720, regionState=CLOSING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:02,765 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T11:20:02,765 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; CloseRegionProcedure 8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:20:02,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T11:20:02,916 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:02,916 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(124): Close 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:02,916 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T11:20:02,916 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1681): Closing 8dd1f041f5ff83d363163edeec4cd720, disabling compactions & flushes 2024-11-20T11:20:02,916 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:20:03,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/511100b33e4e4c42b5c8175d4dba0b82 2024-11-20T11:20:03,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/041a6263e8cd48cd9f825210fb0c7c5a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/041a6263e8cd48cd9f825210fb0c7c5a 2024-11-20T11:20:03,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/041a6263e8cd48cd9f825210fb0c7c5a, entries=150, sequenceid=303, filesize=30.5 K 2024-11-20T11:20:03,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/c0d494e4f9a84d8e9f326e3dd91a29f2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c0d494e4f9a84d8e9f326e3dd91a29f2 2024-11-20T11:20:03,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c0d494e4f9a84d8e9f326e3dd91a29f2, entries=150, sequenceid=303, filesize=12.0 K 2024-11-20T11:20:03,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/511100b33e4e4c42b5c8175d4dba0b82 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/511100b33e4e4c42b5c8175d4dba0b82 2024-11-20T11:20:03,047 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/511100b33e4e4c42b5c8175d4dba0b82, entries=150, sequenceid=303, filesize=12.0 K 2024-11-20T11:20:03,048 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=26.84 KB/27480 for 8dd1f041f5ff83d363163edeec4cd720 in 1663ms, sequenceid=303, compaction requested=true 2024-11-20T11:20:03,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:20:03,048 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:20:03,048 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:20:03,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:03,048 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. after waiting 0 ms 2024-11-20T11:20:03,048 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. because compaction request was cancelled 2024-11-20T11:20:03,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:03,048 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:20:03,048 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:A 2024-11-20T11:20:03,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:03,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:03,048 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. because compaction request was cancelled 2024-11-20T11:20:03,048 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:B 2024-11-20T11:20:03,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dd1f041f5ff83d363163edeec4cd720:C, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:03,048 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. because compaction request was cancelled 2024-11-20T11:20:03,048 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(2837): Flushing 8dd1f041f5ff83d363163edeec4cd720 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T11:20:03,048 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dd1f041f5ff83d363163edeec4cd720:C 2024-11-20T11:20:03,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:03,048 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=A 2024-11-20T11:20:03,048 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:03,048 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=B 2024-11-20T11:20:03,048 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:03,048 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dd1f041f5ff83d363163edeec4cd720, store=C 2024-11-20T11:20:03,048 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:03,054 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120db98605e3aad4a7296cd5a9083749cbb_8dd1f041f5ff83d363163edeec4cd720 is 50, key is test_row_0/A:col10/1732101601401/Put/seqid=0 2024-11-20T11:20:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T11:20:03,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742037_1213 (size=12454) 2024-11-20T11:20:03,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T11:20:03,459 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:03,463 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120db98605e3aad4a7296cd5a9083749cbb_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120db98605e3aad4a7296cd5a9083749cbb_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:03,464 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/69b5eb3cc7374f4ab7f4b24491b0897a, store: [table=TestAcidGuarantees family=A region=8dd1f041f5ff83d363163edeec4cd720] 2024-11-20T11:20:03,464 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/69b5eb3cc7374f4ab7f4b24491b0897a is 175, key is test_row_0/A:col10/1732101601401/Put/seqid=0 2024-11-20T11:20:03,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742038_1214 (size=31255) 2024-11-20T11:20:03,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T11:20:03,869 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=310, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/69b5eb3cc7374f4ab7f4b24491b0897a 2024-11-20T11:20:03,875 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/c45bfa0779824c37904c4fbd231e49b3 is 50, key is test_row_0/B:col10/1732101601401/Put/seqid=0 2024-11-20T11:20:03,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742039_1215 (size=12301) 2024-11-20T11:20:04,280 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/c45bfa0779824c37904c4fbd231e49b3 2024-11-20T11:20:04,287 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/09c4d4d81dcd40d48b1b5f43311a6b34 is 50, key is test_row_0/C:col10/1732101601401/Put/seqid=0 2024-11-20T11:20:04,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742040_1216 (size=12301) 2024-11-20T11:20:04,691 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/09c4d4d81dcd40d48b1b5f43311a6b34 2024-11-20T11:20:04,696 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/A/69b5eb3cc7374f4ab7f4b24491b0897a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/69b5eb3cc7374f4ab7f4b24491b0897a 2024-11-20T11:20:04,700 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/69b5eb3cc7374f4ab7f4b24491b0897a, entries=150, sequenceid=310, filesize=30.5 K 2024-11-20T11:20:04,700 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/B/c45bfa0779824c37904c4fbd231e49b3 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c45bfa0779824c37904c4fbd231e49b3 2024-11-20T11:20:04,704 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c45bfa0779824c37904c4fbd231e49b3, entries=150, sequenceid=310, filesize=12.0 K 2024-11-20T11:20:04,705 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/.tmp/C/09c4d4d81dcd40d48b1b5f43311a6b34 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/09c4d4d81dcd40d48b1b5f43311a6b34 2024-11-20T11:20:04,709 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/09c4d4d81dcd40d48b1b5f43311a6b34, entries=150, sequenceid=310, filesize=12.0 K 2024-11-20T11:20:04,710 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 8dd1f041f5ff83d363163edeec4cd720 in 1662ms, sequenceid=310, compaction requested=true 2024-11-20T11:20:04,711 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e956edf7d8a5449a8b1cddb021caebc9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/1123abe981d6444f855e5e68cc364054, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/561da633798f4b67a54f493e82216a9c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/9ce419c2a2144ee38cbfc1981896cf9f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/4a57a0176ba745caa9150305279d6520, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/ecb333ba38bf454fae33ab0133cec11e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/33da7bdcac604f1f9b6cd8e7413a13d7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/b1aacd4bc83f49af8962f060882379cb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/57da6258d45c46a99dbdbd6280b64cc1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/af60f3f941e04d1cb207e470d04c76e6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3772ab466268497f88f38aef4d8ac682, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3aeb0c3e2bc2400691be53d239f41338, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/effd967352dd477b8e55715c0ceb6c59, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/0a9b66f7af844e62b19c13b88261c6c9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/f37359dcbc9f445393537601f8faebce, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/11baddf9d75c45938566508eb2bfbe01, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/5e4efb7b3baf4e6bbdec80b8bdbbf54a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e83ec71bbda745e889c6e242423adb46] to archive 2024-11-20T11:20:04,712 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:20:04,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e956edf7d8a5449a8b1cddb021caebc9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e956edf7d8a5449a8b1cddb021caebc9 2024-11-20T11:20:04,714 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/1123abe981d6444f855e5e68cc364054 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/1123abe981d6444f855e5e68cc364054 2024-11-20T11:20:04,715 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/561da633798f4b67a54f493e82216a9c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/561da633798f4b67a54f493e82216a9c 2024-11-20T11:20:04,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/9ce419c2a2144ee38cbfc1981896cf9f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/9ce419c2a2144ee38cbfc1981896cf9f 2024-11-20T11:20:04,717 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/4a57a0176ba745caa9150305279d6520 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/4a57a0176ba745caa9150305279d6520 2024-11-20T11:20:04,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/ecb333ba38bf454fae33ab0133cec11e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/ecb333ba38bf454fae33ab0133cec11e 2024-11-20T11:20:04,719 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/33da7bdcac604f1f9b6cd8e7413a13d7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/33da7bdcac604f1f9b6cd8e7413a13d7 2024-11-20T11:20:04,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/b1aacd4bc83f49af8962f060882379cb to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/b1aacd4bc83f49af8962f060882379cb 2024-11-20T11:20:04,722 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/57da6258d45c46a99dbdbd6280b64cc1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/57da6258d45c46a99dbdbd6280b64cc1 2024-11-20T11:20:04,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/af60f3f941e04d1cb207e470d04c76e6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/af60f3f941e04d1cb207e470d04c76e6 2024-11-20T11:20:04,724 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3772ab466268497f88f38aef4d8ac682 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3772ab466268497f88f38aef4d8ac682 2024-11-20T11:20:04,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3aeb0c3e2bc2400691be53d239f41338 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/3aeb0c3e2bc2400691be53d239f41338 2024-11-20T11:20:04,726 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/effd967352dd477b8e55715c0ceb6c59 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/effd967352dd477b8e55715c0ceb6c59 2024-11-20T11:20:04,727 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/0a9b66f7af844e62b19c13b88261c6c9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/0a9b66f7af844e62b19c13b88261c6c9 2024-11-20T11:20:04,728 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/f37359dcbc9f445393537601f8faebce to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/f37359dcbc9f445393537601f8faebce 2024-11-20T11:20:04,729 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/11baddf9d75c45938566508eb2bfbe01 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/11baddf9d75c45938566508eb2bfbe01 2024-11-20T11:20:04,730 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/5e4efb7b3baf4e6bbdec80b8bdbbf54a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/5e4efb7b3baf4e6bbdec80b8bdbbf54a 2024-11-20T11:20:04,731 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e83ec71bbda745e889c6e242423adb46 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/e83ec71bbda745e889c6e242423adb46 2024-11-20T11:20:04,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/adc9386efdb14a86b0555a1bd05aa396, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/85b1b37f41b74af584463f6ba6cbb0e6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c6c2f0be4eed45f8aea82fa8f5e7e394, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/5f2e6fb8676a4a9bbec526def9928c36, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/902cbe220911469c893eebe9d8b232f1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/e454e56b2aeb439d903173a3d52c685e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/480c8f02e3ee48bf8977e15d57c3f6dd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/76c0a93e640f40f5b64c1dafbc066a05, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/bb7e944d50014f92bbf0a5e7fef53f30, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/303329d728dd4bd5a52fbf826bb5e939, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/1c3bd6dafa674bdcab38ecb9982703ed, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/7d3f6add7ca74127b52f800ceb51b67c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/524d8cb3f93e4506a12a838e471acea5, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/0a6aeb737f3a4f2e9d04c37c9e4a893c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/68f4b46e33774e1898219b6b75265c67, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/691875146ac9467b9b6beb032caf9f4c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/685c5a05cc3c420caaef6dfb86b3bf5c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/b02d1068de7a49059997c5189fdf6cbb] to archive 2024-11-20T11:20:04,733 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:20:04,734 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/adc9386efdb14a86b0555a1bd05aa396 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/adc9386efdb14a86b0555a1bd05aa396 2024-11-20T11:20:04,735 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/85b1b37f41b74af584463f6ba6cbb0e6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/85b1b37f41b74af584463f6ba6cbb0e6 2024-11-20T11:20:04,736 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c6c2f0be4eed45f8aea82fa8f5e7e394 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c6c2f0be4eed45f8aea82fa8f5e7e394 2024-11-20T11:20:04,737 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/5f2e6fb8676a4a9bbec526def9928c36 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/5f2e6fb8676a4a9bbec526def9928c36 2024-11-20T11:20:04,737 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/902cbe220911469c893eebe9d8b232f1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/902cbe220911469c893eebe9d8b232f1 2024-11-20T11:20:04,738 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/e454e56b2aeb439d903173a3d52c685e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/e454e56b2aeb439d903173a3d52c685e 2024-11-20T11:20:04,739 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/480c8f02e3ee48bf8977e15d57c3f6dd to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/480c8f02e3ee48bf8977e15d57c3f6dd 2024-11-20T11:20:04,740 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/76c0a93e640f40f5b64c1dafbc066a05 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/76c0a93e640f40f5b64c1dafbc066a05 2024-11-20T11:20:04,741 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/bb7e944d50014f92bbf0a5e7fef53f30 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/bb7e944d50014f92bbf0a5e7fef53f30 2024-11-20T11:20:04,741 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/303329d728dd4bd5a52fbf826bb5e939 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/303329d728dd4bd5a52fbf826bb5e939 2024-11-20T11:20:04,742 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/1c3bd6dafa674bdcab38ecb9982703ed to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/1c3bd6dafa674bdcab38ecb9982703ed 2024-11-20T11:20:04,743 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/7d3f6add7ca74127b52f800ceb51b67c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/7d3f6add7ca74127b52f800ceb51b67c 2024-11-20T11:20:04,744 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/524d8cb3f93e4506a12a838e471acea5 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/524d8cb3f93e4506a12a838e471acea5 2024-11-20T11:20:04,745 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/0a6aeb737f3a4f2e9d04c37c9e4a893c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/0a6aeb737f3a4f2e9d04c37c9e4a893c 2024-11-20T11:20:04,746 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/68f4b46e33774e1898219b6b75265c67 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/68f4b46e33774e1898219b6b75265c67 2024-11-20T11:20:04,747 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/691875146ac9467b9b6beb032caf9f4c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/691875146ac9467b9b6beb032caf9f4c 2024-11-20T11:20:04,748 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/685c5a05cc3c420caaef6dfb86b3bf5c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/685c5a05cc3c420caaef6dfb86b3bf5c 2024-11-20T11:20:04,749 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/b02d1068de7a49059997c5189fdf6cbb to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/b02d1068de7a49059997c5189fdf6cbb 2024-11-20T11:20:04,750 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/000ef89b88f0435ea6ddeadeb3e24b58, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/8eba5d8880ea41c4a929c3c6d21794d9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/2bc86f637bd84bc38a87c3886bcef2cd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/5dda63f6774c47b6a8af8d0e3be1f579, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/326de11523dc4d45b88f5c2e8817a25d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/980e839be2bc4973a781b4d56d69dd7e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/31ee1cbc4ddb4e438f60e0f0462eacbd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/8102f2fbe71343d2a883f1df402668f8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/d79e2a86fdff45758814dcdda66a7a01, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/0e17194387c34942a802fce4fd04858d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/fd9d949dfb2741e1b910a3cf8b249a33, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/1f28ba679858450293f59bfaf3f00a9d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/54b6f1aba74a4454a56f75cd3214f3b8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/0b2434c027d049d08a22f2944f857ada, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/b00995c4e64f45f9aee2395f46612513, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/847a725825624625afc02b19450d7747, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/21c9b45be6c44403bef1a482bdd3049d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/593abbe9e4aa4b94a81701997f9216ac] to archive 2024-11-20T11:20:04,751 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:20:04,752 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/000ef89b88f0435ea6ddeadeb3e24b58 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/000ef89b88f0435ea6ddeadeb3e24b58 2024-11-20T11:20:04,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/8eba5d8880ea41c4a929c3c6d21794d9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/8eba5d8880ea41c4a929c3c6d21794d9 2024-11-20T11:20:04,754 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/2bc86f637bd84bc38a87c3886bcef2cd to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/2bc86f637bd84bc38a87c3886bcef2cd 2024-11-20T11:20:04,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/5dda63f6774c47b6a8af8d0e3be1f579 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/5dda63f6774c47b6a8af8d0e3be1f579 2024-11-20T11:20:04,756 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/326de11523dc4d45b88f5c2e8817a25d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/326de11523dc4d45b88f5c2e8817a25d 2024-11-20T11:20:04,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/980e839be2bc4973a781b4d56d69dd7e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/980e839be2bc4973a781b4d56d69dd7e 2024-11-20T11:20:04,758 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/31ee1cbc4ddb4e438f60e0f0462eacbd to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/31ee1cbc4ddb4e438f60e0f0462eacbd 2024-11-20T11:20:04,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/8102f2fbe71343d2a883f1df402668f8 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/8102f2fbe71343d2a883f1df402668f8 2024-11-20T11:20:04,760 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/d79e2a86fdff45758814dcdda66a7a01 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/d79e2a86fdff45758814dcdda66a7a01 2024-11-20T11:20:04,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/0e17194387c34942a802fce4fd04858d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/0e17194387c34942a802fce4fd04858d 2024-11-20T11:20:04,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/fd9d949dfb2741e1b910a3cf8b249a33 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/fd9d949dfb2741e1b910a3cf8b249a33 2024-11-20T11:20:04,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/1f28ba679858450293f59bfaf3f00a9d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/1f28ba679858450293f59bfaf3f00a9d 2024-11-20T11:20:04,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/54b6f1aba74a4454a56f75cd3214f3b8 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/54b6f1aba74a4454a56f75cd3214f3b8 2024-11-20T11:20:04,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/0b2434c027d049d08a22f2944f857ada to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/0b2434c027d049d08a22f2944f857ada 2024-11-20T11:20:04,766 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/b00995c4e64f45f9aee2395f46612513 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/b00995c4e64f45f9aee2395f46612513 2024-11-20T11:20:04,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/847a725825624625afc02b19450d7747 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/847a725825624625afc02b19450d7747 2024-11-20T11:20:04,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/21c9b45be6c44403bef1a482bdd3049d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/21c9b45be6c44403bef1a482bdd3049d 2024-11-20T11:20:04,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/593abbe9e4aa4b94a81701997f9216ac to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/593abbe9e4aa4b94a81701997f9216ac 2024-11-20T11:20:04,772 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/recovered.edits/313.seqid, newMaxSeqId=313, maxSeqId=4 2024-11-20T11:20:04,773 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720. 2024-11-20T11:20:04,773 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1635): Region close journal for 8dd1f041f5ff83d363163edeec4cd720: 2024-11-20T11:20:04,774 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(170): Closed 8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,775 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=8dd1f041f5ff83d363163edeec4cd720, regionState=CLOSED 2024-11-20T11:20:04,777 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-20T11:20:04,777 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; CloseRegionProcedure 8dd1f041f5ff83d363163edeec4cd720, server=ee8338ed7cc0,35185,1732101546666 in 2.0110 sec 2024-11-20T11:20:04,778 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-11-20T11:20:04,778 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dd1f041f5ff83d363163edeec4cd720, UNASSIGN in 2.0140 sec 2024-11-20T11:20:04,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-20T11:20:04,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.0160 sec 2024-11-20T11:20:04,780 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101604780"}]},"ts":"1732101604780"} 2024-11-20T11:20:04,781 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T11:20:04,784 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T11:20:04,785 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0350 sec 2024-11-20T11:20:04,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T11:20:04,857 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-20T11:20:04,858 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T11:20:04,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:04,859 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=71, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:04,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T11:20:04,860 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=71, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:04,862 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,865 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/recovered.edits] 2024-11-20T11:20:04,867 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/041a6263e8cd48cd9f825210fb0c7c5a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/041a6263e8cd48cd9f825210fb0c7c5a 2024-11-20T11:20:04,869 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/69b5eb3cc7374f4ab7f4b24491b0897a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/69b5eb3cc7374f4ab7f4b24491b0897a 2024-11-20T11:20:04,870 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/d8d857ae983a4da096cbebc62227bde9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/d8d857ae983a4da096cbebc62227bde9 2024-11-20T11:20:04,871 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/d9a0751cf0a04d71bbb94f8ef03e7120 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/A/d9a0751cf0a04d71bbb94f8ef03e7120 2024-11-20T11:20:04,874 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c0d494e4f9a84d8e9f326e3dd91a29f2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c0d494e4f9a84d8e9f326e3dd91a29f2 2024-11-20T11:20:04,875 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c45bfa0779824c37904c4fbd231e49b3 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/c45bfa0779824c37904c4fbd231e49b3 2024-11-20T11:20:04,876 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/d4b7d02b2d0e4e02bf9dbd1754db5e22 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/d4b7d02b2d0e4e02bf9dbd1754db5e22 2024-11-20T11:20:04,877 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/e63483191d934ea0ae54ff270f574b48 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/B/e63483191d934ea0ae54ff270f574b48 2024-11-20T11:20:04,880 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/09c4d4d81dcd40d48b1b5f43311a6b34 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/09c4d4d81dcd40d48b1b5f43311a6b34 2024-11-20T11:20:04,881 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/37b54bc727144943b5a0f326c4ef3a90 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/37b54bc727144943b5a0f326c4ef3a90 2024-11-20T11:20:04,882 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/511100b33e4e4c42b5c8175d4dba0b82 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/511100b33e4e4c42b5c8175d4dba0b82 2024-11-20T11:20:04,884 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/9d9f3d9e8b6a4cf98f49063242bdf492 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/C/9d9f3d9e8b6a4cf98f49063242bdf492 2024-11-20T11:20:04,887 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/recovered.edits/313.seqid to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720/recovered.edits/313.seqid 2024-11-20T11:20:04,887 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,887 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T11:20:04,888 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T11:20:04,889 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T11:20:04,894 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112004a9511653374ffe8e65b95f26d49dc5_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112004a9511653374ffe8e65b95f26d49dc5_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,895 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201ce53d5e494343049cf052b5d6908518_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201ce53d5e494343049cf052b5d6908518_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,897 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203de1730e008445008d237257f2b823a2_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203de1730e008445008d237257f2b823a2_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,898 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120446f304b451c4959a17531b923b37eff_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120446f304b451c4959a17531b923b37eff_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,898 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120482084ec32194f9b9c66e0c023246362_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120482084ec32194f9b9c66e0c023246362_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,899 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204e505d2e93f64f02b613167d7659201e_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204e505d2e93f64f02b613167d7659201e_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,900 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112055d3e8a45266494e8f71168146e2356c_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112055d3e8a45266494e8f71168146e2356c_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,902 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207f783f9b1b224f0d851b702d5d467066_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207f783f9b1b224f0d851b702d5d467066_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,903 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112081c1e3a6a49f45419938ebb482bb7a9c_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112081c1e3a6a49f45419938ebb482bb7a9c_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,904 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120abc157ea001c4e329c5eae06ba92ba23_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120abc157ea001c4e329c5eae06ba92ba23_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,906 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c9b601d3376f47ebb8cbe9ad0d34fb55_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c9b601d3376f47ebb8cbe9ad0d34fb55_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,907 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d0121a1077af43e494bc7d9c09745317_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d0121a1077af43e494bc7d9c09745317_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,909 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d2347eeafab54f2f906fc4d72dafc55f_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d2347eeafab54f2f906fc4d72dafc55f_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,910 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120db98605e3aad4a7296cd5a9083749cbb_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120db98605e3aad4a7296cd5a9083749cbb_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,911 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ec149889ad824605bdd894b410191984_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ec149889ad824605bdd894b410191984_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,913 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ed89989888554dc994ac400a20f31a6f_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ed89989888554dc994ac400a20f31a6f_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,914 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120efb3cc56fc97425da0bff447718a395d_8dd1f041f5ff83d363163edeec4cd720 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120efb3cc56fc97425da0bff447718a395d_8dd1f041f5ff83d363163edeec4cd720 2024-11-20T11:20:04,915 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T11:20:04,917 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=71, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:04,920 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T11:20:04,922 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T11:20:04,923 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=71, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:04,923 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T11:20:04,923 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732101604923"}]},"ts":"9223372036854775807"} 2024-11-20T11:20:04,925 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T11:20:04,925 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 8dd1f041f5ff83d363163edeec4cd720, NAME => 'TestAcidGuarantees,,1732101576637.8dd1f041f5ff83d363163edeec4cd720.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T11:20:04,925 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T11:20:04,926 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732101604925"}]},"ts":"9223372036854775807"} 2024-11-20T11:20:04,927 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T11:20:04,930 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=71, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:04,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 72 msec 2024-11-20T11:20:04,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T11:20:04,960 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-20T11:20:04,970 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=242 (was 240) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1811910228_22 at /127.0.0.1:38262 [Waiting for operation #322] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6a7920e2-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_669624712_22 at /127.0.0.1:38260 [Waiting for operation #320] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_669624712_22 at /127.0.0.1:48038 [Waiting for operation #712] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6a7920e2-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1811910228_22 at /127.0.0.1:40816 [Waiting for operation #465] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/cluster_e3ab1e07-46f1-e4fd-8280-4ad2ac7fbc56/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/cluster_e3ab1e07-46f1-e4fd-8280-4ad2ac7fbc56/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6a7920e2-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6a7920e2-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=462 (was 459) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=327 (was 284) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5871 (was 6028) 2024-11-20T11:20:04,978 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=242, OpenFileDescriptor=462, MaxFileDescriptor=1048576, SystemLoadAverage=327, ProcessCount=11, AvailableMemoryMB=5871 2024-11-20T11:20:04,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T11:20:04,980 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T11:20:04,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=72, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:04,981 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T11:20:04,981 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:04,982 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 72 2024-11-20T11:20:04,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-20T11:20:04,982 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T11:20:04,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742041_1217 (size=960) 2024-11-20T11:20:05,055 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T11:20:05,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-20T11:20:05,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-20T11:20:05,390 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830 2024-11-20T11:20:05,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742042_1218 (size=53) 2024-11-20T11:20:05,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-20T11:20:05,796 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:20:05,796 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing dda426a558461ab734d5c9192624badc, disabling compactions & flushes 2024-11-20T11:20:05,796 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:05,796 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:05,796 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. after waiting 0 ms 2024-11-20T11:20:05,796 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:05,796 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:05,796 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:05,797 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T11:20:05,797 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732101605797"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732101605797"}]},"ts":"1732101605797"} 2024-11-20T11:20:05,798 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T11:20:05,799 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T11:20:05,799 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101605799"}]},"ts":"1732101605799"} 2024-11-20T11:20:05,800 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T11:20:05,803 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=dda426a558461ab734d5c9192624badc, ASSIGN}] 2024-11-20T11:20:05,804 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=dda426a558461ab734d5c9192624badc, ASSIGN 2024-11-20T11:20:05,805 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=dda426a558461ab734d5c9192624badc, ASSIGN; state=OFFLINE, location=ee8338ed7cc0,35185,1732101546666; forceNewPlan=false, retain=false 2024-11-20T11:20:05,955 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=73 updating hbase:meta row=dda426a558461ab734d5c9192624badc, regionState=OPENING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:05,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; OpenRegionProcedure dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:20:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-20T11:20:06,108 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:06,110 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:06,111 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7285): Opening region: {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} 2024-11-20T11:20:06,111 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees dda426a558461ab734d5c9192624badc 2024-11-20T11:20:06,111 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:20:06,111 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7327): checking encryption for dda426a558461ab734d5c9192624badc 2024-11-20T11:20:06,111 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7330): checking classloading for dda426a558461ab734d5c9192624badc 2024-11-20T11:20:06,112 INFO [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region dda426a558461ab734d5c9192624badc 2024-11-20T11:20:06,114 INFO [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:20:06,114 INFO [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dda426a558461ab734d5c9192624badc columnFamilyName A 2024-11-20T11:20:06,114 DEBUG [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:06,114 INFO [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] regionserver.HStore(327): Store=dda426a558461ab734d5c9192624badc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:20:06,114 INFO [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region dda426a558461ab734d5c9192624badc 2024-11-20T11:20:06,115 INFO [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:20:06,116 INFO [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dda426a558461ab734d5c9192624badc columnFamilyName B 2024-11-20T11:20:06,116 DEBUG [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:06,116 INFO [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] regionserver.HStore(327): Store=dda426a558461ab734d5c9192624badc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:20:06,116 INFO [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region dda426a558461ab734d5c9192624badc 2024-11-20T11:20:06,117 INFO [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:20:06,117 INFO [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dda426a558461ab734d5c9192624badc columnFamilyName C 2024-11-20T11:20:06,117 DEBUG [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:06,118 INFO [StoreOpener-dda426a558461ab734d5c9192624badc-1 {}] regionserver.HStore(327): Store=dda426a558461ab734d5c9192624badc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:20:06,118 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:06,118 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc 2024-11-20T11:20:06,119 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc 2024-11-20T11:20:06,120 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T11:20:06,121 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1085): writing seq id for dda426a558461ab734d5c9192624badc 2024-11-20T11:20:06,122 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T11:20:06,123 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1102): Opened dda426a558461ab734d5c9192624badc; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62354273, jitterRate=-0.07084892690181732}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T11:20:06,124 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1001): Region open journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:06,124 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., pid=74, masterSystemTime=1732101606108 2024-11-20T11:20:06,125 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:06,125 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:06,126 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=73 updating hbase:meta row=dda426a558461ab734d5c9192624badc, regionState=OPEN, openSeqNum=2, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:06,127 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-20T11:20:06,128 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; OpenRegionProcedure dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 in 171 msec 2024-11-20T11:20:06,129 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=73, resume processing ppid=72 2024-11-20T11:20:06,129 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=72, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=dda426a558461ab734d5c9192624badc, ASSIGN in 325 msec 2024-11-20T11:20:06,129 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T11:20:06,129 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101606129"}]},"ts":"1732101606129"} 2024-11-20T11:20:06,130 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T11:20:06,133 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T11:20:06,134 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1530 sec 2024-11-20T11:20:07,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-20T11:20:07,086 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 72 completed 2024-11-20T11:20:07,088 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58341641 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17b6adc5 2024-11-20T11:20:07,091 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a569490, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:07,092 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:07,094 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51952, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:07,094 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T11:20:07,095 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45378, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T11:20:07,097 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x44645c55 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@669e1999 2024-11-20T11:20:07,101 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6862e3ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:07,102 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64ee0130 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72aa9ee5 2024-11-20T11:20:07,104 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d296fed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:07,106 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683b64c3 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ec09297 2024-11-20T11:20:07,109 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8d0caa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:07,110 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07e55eb7 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4dfb20f6 2024-11-20T11:20:07,114 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43f04e0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:07,116 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x03a703d2 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17cf7fc0 2024-11-20T11:20:07,118 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@560ec309, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:07,119 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78b04266 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5886c0f2 2024-11-20T11:20:07,123 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb04aeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:07,124 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x088aa519 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e575aa 2024-11-20T11:20:07,127 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0e9c8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:07,128 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e998dd3 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@131ceb8f 2024-11-20T11:20:07,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d68f787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:07,131 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e4c79b8 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a78bf6d 2024-11-20T11:20:07,134 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e6bf6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:07,135 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d1403c3 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@328852db 2024-11-20T11:20:07,139 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1730a60f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:07,144 DEBUG [hconnection-0x466ed25b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:07,144 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:07,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-20T11:20:07,145 DEBUG [hconnection-0x33838fc5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:07,145 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51964, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:07,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T11:20:07,146 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:07,146 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:07,147 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:07,147 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:07,152 DEBUG [hconnection-0x465bc6e0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:07,153 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51984, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:07,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:07,156 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T11:20:07,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:07,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:07,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:07,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:07,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:07,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:07,172 DEBUG [hconnection-0x32c4ef1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:07,173 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51996, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:07,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101667180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101667179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,184 DEBUG [hconnection-0x5b85b56e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:07,185 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:07,188 DEBUG [hconnection-0x25af8aa1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:07,189 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52012, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:07,189 DEBUG [hconnection-0x380be808-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:07,190 DEBUG [hconnection-0x22ce651f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:07,190 DEBUG [hconnection-0x39e36254-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:07,191 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:07,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,192 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52036, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:07,192 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52016, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:07,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52012 deadline: 1732101667191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52016 deadline: 1732101667193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52036 deadline: 1732101667193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,196 DEBUG [hconnection-0x54702992-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:07,200 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52050, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:07,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/d28025a2c19141b2b21676cbc4b4fa26 is 50, key is test_row_0/A:col10/1732101607156/Put/seqid=0 2024-11-20T11:20:07,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742043_1219 (size=12001) 2024-11-20T11:20:07,234 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/d28025a2c19141b2b21676cbc4b4fa26 2024-11-20T11:20:07,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T11:20:07,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/9d3af5e59ffd410aa5532c87831610a3 is 50, key is test_row_0/B:col10/1732101607156/Put/seqid=0 2024-11-20T11:20:07,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742044_1220 (size=12001) 2024-11-20T11:20:07,274 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/9d3af5e59ffd410aa5532c87831610a3 2024-11-20T11:20:07,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101667282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101667283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52012 deadline: 1732101667294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52036 deadline: 1732101667295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52016 deadline: 1732101667295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,298 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T11:20:07,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:07,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:07,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:07,299 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:07,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:07,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:07,309 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/fb7b2c1f5e8d4a86a88c57fefe408742 is 50, key is test_row_0/C:col10/1732101607156/Put/seqid=0 2024-11-20T11:20:07,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742045_1221 (size=12001) 2024-11-20T11:20:07,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T11:20:07,452 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T11:20:07,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:07,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:07,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:07,453 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:07,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:07,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:07,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101667486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101667486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52012 deadline: 1732101667496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52036 deadline: 1732101667498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52016 deadline: 1732101667499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,605 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T11:20:07,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:07,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:07,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:07,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:07,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:07,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:07,723 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/fb7b2c1f5e8d4a86a88c57fefe408742 2024-11-20T11:20:07,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/d28025a2c19141b2b21676cbc4b4fa26 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/d28025a2c19141b2b21676cbc4b4fa26 2024-11-20T11:20:07,733 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/d28025a2c19141b2b21676cbc4b4fa26, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T11:20:07,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/9d3af5e59ffd410aa5532c87831610a3 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/9d3af5e59ffd410aa5532c87831610a3 2024-11-20T11:20:07,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/9d3af5e59ffd410aa5532c87831610a3, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T11:20:07,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/fb7b2c1f5e8d4a86a88c57fefe408742 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/fb7b2c1f5e8d4a86a88c57fefe408742 2024-11-20T11:20:07,743 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/fb7b2c1f5e8d4a86a88c57fefe408742, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T11:20:07,744 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for dda426a558461ab734d5c9192624badc in 588ms, sequenceid=13, compaction requested=false 2024-11-20T11:20:07,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:07,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T11:20:07,757 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,758 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T11:20:07,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:07,758 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T11:20:07,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:07,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:07,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:07,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:07,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:07,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:07,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/c42c8f2e41274a4a94e7e0c9625790af is 50, key is test_row_0/A:col10/1732101607173/Put/seqid=0 2024-11-20T11:20:07,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742046_1222 (size=12001) 2024-11-20T11:20:07,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:07,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:07,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101667799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52012 deadline: 1732101667799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52036 deadline: 1732101667800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52016 deadline: 1732101667800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101667801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101667902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:07,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101667903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:08,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101668105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:08,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:08,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101668105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:08,180 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/c42c8f2e41274a4a94e7e0c9625790af 2024-11-20T11:20:08,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/978fbee1d0ab4745a9190f26f96a3f2f is 50, key is test_row_0/B:col10/1732101607173/Put/seqid=0 2024-11-20T11:20:08,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742047_1223 (size=12001) 2024-11-20T11:20:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T11:20:08,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52036 deadline: 1732101668303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:08,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52012 deadline: 1732101668304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:08,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52016 deadline: 1732101668306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:08,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:08,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101668408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101668408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:08,594 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/978fbee1d0ab4745a9190f26f96a3f2f 2024-11-20T11:20:08,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/7a5687ed3d344a04b035e24dc47c0b42 is 50, key is test_row_0/C:col10/1732101607173/Put/seqid=0 2024-11-20T11:20:08,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742048_1224 (size=12001) 2024-11-20T11:20:08,615 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/7a5687ed3d344a04b035e24dc47c0b42 2024-11-20T11:20:08,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/c42c8f2e41274a4a94e7e0c9625790af as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c42c8f2e41274a4a94e7e0c9625790af 2024-11-20T11:20:08,624 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c42c8f2e41274a4a94e7e0c9625790af, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T11:20:08,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/978fbee1d0ab4745a9190f26f96a3f2f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/978fbee1d0ab4745a9190f26f96a3f2f 2024-11-20T11:20:08,629 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/978fbee1d0ab4745a9190f26f96a3f2f, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T11:20:08,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/7a5687ed3d344a04b035e24dc47c0b42 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7a5687ed3d344a04b035e24dc47c0b42 2024-11-20T11:20:08,635 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7a5687ed3d344a04b035e24dc47c0b42, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T11:20:08,636 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for dda426a558461ab734d5c9192624badc in 877ms, sequenceid=37, compaction requested=false 2024-11-20T11:20:08,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:08,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:08,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-20T11:20:08,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-20T11:20:08,638 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-20T11:20:08,638 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4900 sec 2024-11-20T11:20:08,640 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.4950 sec 2024-11-20T11:20:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:08,913 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T11:20:08,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:08,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:08,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:08,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:08,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:08,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:08,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/e8a18c57627b41a2bff54933fbcf6256 is 50, key is test_row_0/A:col10/1732101607798/Put/seqid=0 2024-11-20T11:20:08,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742049_1225 (size=14341) 2024-11-20T11:20:08,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/e8a18c57627b41a2bff54933fbcf6256 2024-11-20T11:20:08,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/afeaa8d071e74eee92a54250d632924b is 50, key is test_row_0/B:col10/1732101607798/Put/seqid=0 2024-11-20T11:20:08,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742050_1226 (size=12001) 2024-11-20T11:20:08,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:08,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101668944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:08,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:08,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101668945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:09,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101669048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:09,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101669048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T11:20:09,250 INFO [Thread-1031 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-20T11:20:09,251 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:09,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:09,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101669250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:09,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101669250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-20T11:20:09,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T11:20:09,253 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:09,254 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:09,254 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:09,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:09,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52036 deadline: 1732101669309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:09,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52016 deadline: 1732101669311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:09,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52012 deadline: 1732101669314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/afeaa8d071e74eee92a54250d632924b 2024-11-20T11:20:09,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T11:20:09,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/71bf05eb929e478ea9397eff7407b7d9 is 50, key is test_row_0/C:col10/1732101607798/Put/seqid=0 2024-11-20T11:20:09,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742051_1227 (size=12001) 2024-11-20T11:20:09,405 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,406 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T11:20:09,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:09,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:09,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:09,406 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:09,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:09,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:09,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:09,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101669553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T11:20:09,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:09,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101669555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,558 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T11:20:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:09,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:09,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:09,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:09,711 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T11:20:09,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:09,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:09,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:09,712 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:09,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:09,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:09,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/71bf05eb929e478ea9397eff7407b7d9 2024-11-20T11:20:09,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/e8a18c57627b41a2bff54933fbcf6256 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/e8a18c57627b41a2bff54933fbcf6256 2024-11-20T11:20:09,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/e8a18c57627b41a2bff54933fbcf6256, entries=200, sequenceid=50, filesize=14.0 K 2024-11-20T11:20:09,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/afeaa8d071e74eee92a54250d632924b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/afeaa8d071e74eee92a54250d632924b 2024-11-20T11:20:09,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/afeaa8d071e74eee92a54250d632924b, entries=150, sequenceid=50, filesize=11.7 K 2024-11-20T11:20:09,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/71bf05eb929e478ea9397eff7407b7d9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/71bf05eb929e478ea9397eff7407b7d9 2024-11-20T11:20:09,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/71bf05eb929e478ea9397eff7407b7d9, entries=150, sequenceid=50, filesize=11.7 K 2024-11-20T11:20:09,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for dda426a558461ab734d5c9192624badc in 874ms, sequenceid=50, compaction requested=true 2024-11-20T11:20:09,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:09,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:09,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:09,788 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:09,788 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:09,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:09,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:09,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:09,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:09,789 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:09,789 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/A is initiating minor compaction (all files) 2024-11-20T11:20:09,789 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/A in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:09,789 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/d28025a2c19141b2b21676cbc4b4fa26, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c42c8f2e41274a4a94e7e0c9625790af, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/e8a18c57627b41a2bff54933fbcf6256] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=37.4 K 2024-11-20T11:20:09,789 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:09,789 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/B is initiating minor compaction (all files) 2024-11-20T11:20:09,789 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/B in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:09,789 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/9d3af5e59ffd410aa5532c87831610a3, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/978fbee1d0ab4745a9190f26f96a3f2f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/afeaa8d071e74eee92a54250d632924b] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=35.2 K 2024-11-20T11:20:09,790 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting d28025a2c19141b2b21676cbc4b4fa26, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732101607152 2024-11-20T11:20:09,790 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d3af5e59ffd410aa5532c87831610a3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732101607152 2024-11-20T11:20:09,790 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting c42c8f2e41274a4a94e7e0c9625790af, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732101607173 2024-11-20T11:20:09,790 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 978fbee1d0ab4745a9190f26f96a3f2f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732101607173 2024-11-20T11:20:09,790 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8a18c57627b41a2bff54933fbcf6256, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732101607795 2024-11-20T11:20:09,791 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting afeaa8d071e74eee92a54250d632924b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732101607795 2024-11-20T11:20:09,797 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#A#compaction#186 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:09,798 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/ec8d6cf14b414410bfd4a4af18e9c265 is 50, key is test_row_0/A:col10/1732101607798/Put/seqid=0 2024-11-20T11:20:09,799 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#B#compaction#187 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:09,800 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/a165d42d366b487b9dbde2c100d8b0a8 is 50, key is test_row_0/B:col10/1732101607798/Put/seqid=0 2024-11-20T11:20:09,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742052_1228 (size=12104) 2024-11-20T11:20:09,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742053_1229 (size=12104) 2024-11-20T11:20:09,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T11:20:09,864 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:09,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T11:20:09,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:09,865 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T11:20:09,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:09,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:09,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:09,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:09,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:09,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:09,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/379bd1722e914f21bc26ad4f07834394 is 50, key is test_row_0/A:col10/1732101608943/Put/seqid=0 2024-11-20T11:20:09,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742054_1230 (size=12001) 2024-11-20T11:20:10,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:10,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:10,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:10,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101670069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:10,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:10,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101670071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:10,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:10,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101670173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:10,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:10,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101670174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:10,209 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/ec8d6cf14b414410bfd4a4af18e9c265 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ec8d6cf14b414410bfd4a4af18e9c265 2024-11-20T11:20:10,213 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/A of dda426a558461ab734d5c9192624badc into ec8d6cf14b414410bfd4a4af18e9c265(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:10,214 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:10,214 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/A, priority=13, startTime=1732101609788; duration=0sec 2024-11-20T11:20:10,214 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:10,214 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:A 2024-11-20T11:20:10,214 DEBUG [master/ee8338ed7cc0:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-20T11:20:10,214 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:10,216 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:10,216 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/C is initiating minor compaction (all files) 2024-11-20T11:20:10,217 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/C in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:10,217 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/fb7b2c1f5e8d4a86a88c57fefe408742, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7a5687ed3d344a04b035e24dc47c0b42, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/71bf05eb929e478ea9397eff7407b7d9] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=35.2 K 2024-11-20T11:20:10,217 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb7b2c1f5e8d4a86a88c57fefe408742, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732101607152 2024-11-20T11:20:10,217 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a5687ed3d344a04b035e24dc47c0b42, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732101607173 2024-11-20T11:20:10,218 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71bf05eb929e478ea9397eff7407b7d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732101607795 2024-11-20T11:20:10,219 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/a165d42d366b487b9dbde2c100d8b0a8 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/a165d42d366b487b9dbde2c100d8b0a8 2024-11-20T11:20:10,219 DEBUG [master/ee8338ed7cc0:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 2931c42a5e0431c7e1d9a63f9b78ad4e changed from -1.0 to 0.0, refreshing cache 2024-11-20T11:20:10,226 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/B of dda426a558461ab734d5c9192624badc into a165d42d366b487b9dbde2c100d8b0a8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:10,226 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:10,226 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/B, priority=13, startTime=1732101609788; duration=0sec 2024-11-20T11:20:10,226 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:10,226 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:B 2024-11-20T11:20:10,227 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#C#compaction#189 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:10,227 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/7a13cbfd0ae247748ba1c71b24e8ccc6 is 50, key is test_row_0/C:col10/1732101607798/Put/seqid=0 2024-11-20T11:20:10,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742055_1231 (size=12104) 2024-11-20T11:20:10,274 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/379bd1722e914f21bc26ad4f07834394 2024-11-20T11:20:10,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/15a98fbc95d24b259ffb1dff34ad7b53 is 50, key is test_row_0/B:col10/1732101608943/Put/seqid=0 2024-11-20T11:20:10,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742056_1232 (size=12001) 2024-11-20T11:20:10,301 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T11:20:10,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T11:20:10,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:10,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101670377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:10,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:10,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101670377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:10,650 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/7a13cbfd0ae247748ba1c71b24e8ccc6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7a13cbfd0ae247748ba1c71b24e8ccc6 2024-11-20T11:20:10,656 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/C of dda426a558461ab734d5c9192624badc into 7a13cbfd0ae247748ba1c71b24e8ccc6(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:10,656 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:10,656 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/C, priority=13, startTime=1732101609788; duration=0sec 2024-11-20T11:20:10,656 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:10,656 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:C 2024-11-20T11:20:10,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:10,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101670679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:10,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:10,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101670680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:10,686 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/15a98fbc95d24b259ffb1dff34ad7b53 2024-11-20T11:20:10,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/8573b60d6ee14157afd0841698befcf1 is 50, key is test_row_0/C:col10/1732101608943/Put/seqid=0 2024-11-20T11:20:10,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742057_1233 (size=12001) 2024-11-20T11:20:11,100 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/8573b60d6ee14157afd0841698befcf1 2024-11-20T11:20:11,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/379bd1722e914f21bc26ad4f07834394 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/379bd1722e914f21bc26ad4f07834394 2024-11-20T11:20:11,109 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/379bd1722e914f21bc26ad4f07834394, entries=150, sequenceid=73, filesize=11.7 K 2024-11-20T11:20:11,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/15a98fbc95d24b259ffb1dff34ad7b53 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/15a98fbc95d24b259ffb1dff34ad7b53 2024-11-20T11:20:11,114 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/15a98fbc95d24b259ffb1dff34ad7b53, entries=150, sequenceid=73, filesize=11.7 K 2024-11-20T11:20:11,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/8573b60d6ee14157afd0841698befcf1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/8573b60d6ee14157afd0841698befcf1 2024-11-20T11:20:11,119 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/8573b60d6ee14157afd0841698befcf1, entries=150, sequenceid=73, filesize=11.7 K 2024-11-20T11:20:11,120 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for dda426a558461ab734d5c9192624badc in 1254ms, sequenceid=73, compaction requested=false 2024-11-20T11:20:11,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:11,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:11,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-20T11:20:11,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-20T11:20:11,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-20T11:20:11,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8670 sec 2024-11-20T11:20:11,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.8720 sec 2024-11-20T11:20:11,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:11,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T11:20:11,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:11,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:11,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:11,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:11,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:11,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:11,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/9e653b16aa964165bde8f0de70abf85d is 50, key is test_row_0/A:col10/1732101611182/Put/seqid=0 2024-11-20T11:20:11,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742058_1234 (size=12001) 2024-11-20T11:20:11,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101671212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101671213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101671316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101671316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52012 deadline: 1732101671321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,322 DEBUG [Thread-1025 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:20:11,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52036 deadline: 1732101671323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,324 DEBUG [Thread-1027 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4181 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:20:11,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52016 deadline: 1732101671325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,326 DEBUG [Thread-1021 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4182 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:20:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T11:20:11,357 INFO [Thread-1031 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-20T11:20:11,358 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-20T11:20:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T11:20:11,360 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:11,361 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:11,361 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T11:20:11,512 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T11:20:11,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:11,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:11,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:11,513 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:11,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:11,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:11,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101671517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101671519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,600 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/9e653b16aa964165bde8f0de70abf85d 2024-11-20T11:20:11,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/e164f8085a864cabaaf61b4518cab1b9 is 50, key is test_row_0/B:col10/1732101611182/Put/seqid=0 2024-11-20T11:20:11,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742059_1235 (size=12001) 2024-11-20T11:20:11,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/e164f8085a864cabaaf61b4518cab1b9 2024-11-20T11:20:11,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/323f5812d6234586aef4d676acdc7eb0 is 50, key is test_row_0/C:col10/1732101611182/Put/seqid=0 2024-11-20T11:20:11,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742060_1236 (size=12001) 2024-11-20T11:20:11,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/323f5812d6234586aef4d676acdc7eb0 2024-11-20T11:20:11,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/9e653b16aa964165bde8f0de70abf85d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/9e653b16aa964165bde8f0de70abf85d 2024-11-20T11:20:11,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/9e653b16aa964165bde8f0de70abf85d, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T11:20:11,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/e164f8085a864cabaaf61b4518cab1b9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/e164f8085a864cabaaf61b4518cab1b9 2024-11-20T11:20:11,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/e164f8085a864cabaaf61b4518cab1b9, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T11:20:11,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/323f5812d6234586aef4d676acdc7eb0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/323f5812d6234586aef4d676acdc7eb0 2024-11-20T11:20:11,658 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/323f5812d6234586aef4d676acdc7eb0, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T11:20:11,659 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for dda426a558461ab734d5c9192624badc in 476ms, sequenceid=91, compaction requested=true 2024-11-20T11:20:11,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:11,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:11,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:11,659 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:11,659 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:11,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:11,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:11,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:11,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:11,661 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:11,661 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:11,661 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/A is initiating minor compaction (all files) 2024-11-20T11:20:11,661 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/B is initiating minor compaction (all files) 2024-11-20T11:20:11,661 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/B in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:11,661 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/A in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:11,661 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/a165d42d366b487b9dbde2c100d8b0a8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/15a98fbc95d24b259ffb1dff34ad7b53, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/e164f8085a864cabaaf61b4518cab1b9] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=35.3 K 2024-11-20T11:20:11,661 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ec8d6cf14b414410bfd4a4af18e9c265, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/379bd1722e914f21bc26ad4f07834394, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/9e653b16aa964165bde8f0de70abf85d] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=35.3 K 2024-11-20T11:20:11,662 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting a165d42d366b487b9dbde2c100d8b0a8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732101607795 2024-11-20T11:20:11,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T11:20:11,662 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec8d6cf14b414410bfd4a4af18e9c265, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732101607795 2024-11-20T11:20:11,663 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 15a98fbc95d24b259ffb1dff34ad7b53, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732101608940 2024-11-20T11:20:11,663 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 379bd1722e914f21bc26ad4f07834394, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732101608940 2024-11-20T11:20:11,663 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting e164f8085a864cabaaf61b4518cab1b9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732101610066 2024-11-20T11:20:11,663 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e653b16aa964165bde8f0de70abf85d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732101610066 2024-11-20T11:20:11,665 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,666 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T11:20:11,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:11,666 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T11:20:11,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:11,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:11,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:11,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:11,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:11,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:11,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/cb2450f3272b4eb9a4c4148fe801d1a5 is 50, key is test_row_0/A:col10/1732101611210/Put/seqid=0 2024-11-20T11:20:11,690 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#B#compaction#196 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:11,691 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/2883d683efcd490ebd2093ede81847d6 is 50, key is test_row_0/B:col10/1732101611182/Put/seqid=0 2024-11-20T11:20:11,694 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#A#compaction#197 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:11,694 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/ae53f675783d4cdf884465df4f2c7326 is 50, key is test_row_0/A:col10/1732101611182/Put/seqid=0 2024-11-20T11:20:11,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742061_1237 (size=12001) 2024-11-20T11:20:11,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742062_1238 (size=12207) 2024-11-20T11:20:11,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742063_1239 (size=12207) 2024-11-20T11:20:11,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:11,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:11,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101671838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101671838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101671942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:11,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101671942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:11,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T11:20:12,110 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/cb2450f3272b4eb9a4c4148fe801d1a5 2024-11-20T11:20:12,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/261f2e7be5d44592925a4547183d9c4b is 50, key is test_row_0/B:col10/1732101611210/Put/seqid=0 2024-11-20T11:20:12,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742064_1240 (size=12001) 2024-11-20T11:20:12,124 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/2883d683efcd490ebd2093ede81847d6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/2883d683efcd490ebd2093ede81847d6 2024-11-20T11:20:12,128 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/ae53f675783d4cdf884465df4f2c7326 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ae53f675783d4cdf884465df4f2c7326 2024-11-20T11:20:12,133 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/B of dda426a558461ab734d5c9192624badc into 2883d683efcd490ebd2093ede81847d6(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:12,133 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:12,133 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/B, priority=13, startTime=1732101611659; duration=0sec 2024-11-20T11:20:12,133 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:12,133 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:B 2024-11-20T11:20:12,133 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:12,133 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/A of dda426a558461ab734d5c9192624badc into ae53f675783d4cdf884465df4f2c7326(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:12,133 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:12,134 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/A, priority=13, startTime=1732101611659; duration=0sec 2024-11-20T11:20:12,134 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:12,134 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:A 2024-11-20T11:20:12,134 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:12,134 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/C is initiating minor compaction (all files) 2024-11-20T11:20:12,135 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/C in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:12,135 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7a13cbfd0ae247748ba1c71b24e8ccc6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/8573b60d6ee14157afd0841698befcf1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/323f5812d6234586aef4d676acdc7eb0] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=35.3 K 2024-11-20T11:20:12,135 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a13cbfd0ae247748ba1c71b24e8ccc6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732101607795 2024-11-20T11:20:12,135 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 8573b60d6ee14157afd0841698befcf1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732101608940 2024-11-20T11:20:12,136 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 323f5812d6234586aef4d676acdc7eb0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732101610066 2024-11-20T11:20:12,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:12,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101672145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:12,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:12,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101672145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:12,158 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#C#compaction#199 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:12,159 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/2e799bbca71d4401ab7f67a48c48e798 is 50, key is test_row_0/C:col10/1732101611182/Put/seqid=0 2024-11-20T11:20:12,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742065_1241 (size=12207) 2024-11-20T11:20:12,169 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/2e799bbca71d4401ab7f67a48c48e798 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2e799bbca71d4401ab7f67a48c48e798 2024-11-20T11:20:12,174 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/C of dda426a558461ab734d5c9192624badc into 2e799bbca71d4401ab7f67a48c48e798(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:12,174 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:12,174 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/C, priority=13, startTime=1732101611659; duration=0sec 2024-11-20T11:20:12,174 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:12,174 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:C 2024-11-20T11:20:12,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:12,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101672447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:12,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:12,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101672448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:12,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T11:20:12,524 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/261f2e7be5d44592925a4547183d9c4b 2024-11-20T11:20:12,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/36925946384d45b9999b183fb573f358 is 50, key is test_row_0/C:col10/1732101611210/Put/seqid=0 2024-11-20T11:20:12,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742066_1242 (size=12001) 2024-11-20T11:20:12,939 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/36925946384d45b9999b183fb573f358 2024-11-20T11:20:12,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/cb2450f3272b4eb9a4c4148fe801d1a5 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/cb2450f3272b4eb9a4c4148fe801d1a5 2024-11-20T11:20:12,951 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/cb2450f3272b4eb9a4c4148fe801d1a5, entries=150, sequenceid=112, filesize=11.7 K 2024-11-20T11:20:12,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:12,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101672951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:12,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/261f2e7be5d44592925a4547183d9c4b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/261f2e7be5d44592925a4547183d9c4b 2024-11-20T11:20:12,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101672953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:12,958 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/261f2e7be5d44592925a4547183d9c4b, entries=150, sequenceid=112, filesize=11.7 K 2024-11-20T11:20:12,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/36925946384d45b9999b183fb573f358 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/36925946384d45b9999b183fb573f358 2024-11-20T11:20:12,963 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/36925946384d45b9999b183fb573f358, entries=150, sequenceid=112, filesize=11.7 K 2024-11-20T11:20:12,964 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for dda426a558461ab734d5c9192624badc in 1298ms, sequenceid=112, compaction requested=false 2024-11-20T11:20:12,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:12,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:12,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-20T11:20:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-20T11:20:12,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-20T11:20:12,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6050 sec 2024-11-20T11:20:12,968 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.6090 sec 2024-11-20T11:20:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T11:20:13,465 INFO [Thread-1031 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-20T11:20:13,466 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:13,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-20T11:20:13,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T11:20:13,468 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:13,469 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:13,469 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:13,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T11:20:13,620 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:13,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T11:20:13,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:13,621 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T11:20:13,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:13,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:13,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:13,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:13,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:13,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:13,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/b352297cb2724dda8c3afea87e727122 is 50, key is test_row_0/A:col10/1732101611837/Put/seqid=0 2024-11-20T11:20:13,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742067_1243 (size=12001) 2024-11-20T11:20:13,632 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/b352297cb2724dda8c3afea87e727122 2024-11-20T11:20:13,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/fd1c7d0e58af45788548b34111989416 is 50, key is test_row_0/B:col10/1732101611837/Put/seqid=0 2024-11-20T11:20:13,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742068_1244 (size=12001) 2024-11-20T11:20:13,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T11:20:13,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:13,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:13,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:13,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101673978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:13,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:13,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101673979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:14,045 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/fd1c7d0e58af45788548b34111989416 2024-11-20T11:20:14,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/e22551da4a7c467693bf45832f7feb57 is 50, key is test_row_0/C:col10/1732101611837/Put/seqid=0 2024-11-20T11:20:14,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742069_1245 (size=12001) 2024-11-20T11:20:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T11:20:14,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:14,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101674081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:14,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:14,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101674081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:14,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:14,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101674284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:14,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:14,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101674284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:14,466 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/e22551da4a7c467693bf45832f7feb57 2024-11-20T11:20:14,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/b352297cb2724dda8c3afea87e727122 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/b352297cb2724dda8c3afea87e727122 2024-11-20T11:20:14,477 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/b352297cb2724dda8c3afea87e727122, entries=150, sequenceid=130, filesize=11.7 K 2024-11-20T11:20:14,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/fd1c7d0e58af45788548b34111989416 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/fd1c7d0e58af45788548b34111989416 2024-11-20T11:20:14,484 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/fd1c7d0e58af45788548b34111989416, entries=150, sequenceid=130, filesize=11.7 K 2024-11-20T11:20:14,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/e22551da4a7c467693bf45832f7feb57 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/e22551da4a7c467693bf45832f7feb57 2024-11-20T11:20:14,489 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/e22551da4a7c467693bf45832f7feb57, entries=150, sequenceid=130, filesize=11.7 K 2024-11-20T11:20:14,490 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for dda426a558461ab734d5c9192624badc in 869ms, sequenceid=130, compaction requested=true 2024-11-20T11:20:14,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:14,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:14,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-20T11:20:14,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-20T11:20:14,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-20T11:20:14,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0220 sec 2024-11-20T11:20:14,494 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.0270 sec 2024-11-20T11:20:14,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T11:20:14,571 INFO [Thread-1031 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-20T11:20:14,572 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:14,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-20T11:20:14,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T11:20:14,574 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:14,574 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:14,575 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:14,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:14,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T11:20:14,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:14,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:14,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:14,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:14,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:14,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:14,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/e0a507ebb7b3431ca64894ddc230bd43 is 50, key is test_row_0/A:col10/1732101613978/Put/seqid=0 2024-11-20T11:20:14,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742070_1246 (size=12151) 2024-11-20T11:20:14,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:14,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:14,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101674607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:14,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101674608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:14,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T11:20:14,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:14,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101674711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:14,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:14,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101674711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:14,726 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:14,727 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T11:20:14,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:14,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:14,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:14,727 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:14,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:14,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:14,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T11:20:14,879 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:14,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T11:20:14,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:14,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:14,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:14,880 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:14,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:14,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:14,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:14,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101674913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:14,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:14,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101674914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/e0a507ebb7b3431ca64894ddc230bd43 2024-11-20T11:20:15,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/87899576ba224943b691c92209ff6016 is 50, key is test_row_0/B:col10/1732101613978/Put/seqid=0 2024-11-20T11:20:15,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742071_1247 (size=12151) 2024-11-20T11:20:15,032 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T11:20:15,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:15,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T11:20:15,185 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T11:20:15,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:15,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,186 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:15,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101675216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:15,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101675216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:15,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52036 deadline: 1732101675337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,338 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,338 DEBUG [Thread-1027 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8195 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:20:15,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T11:20:15,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:15,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:15,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52012 deadline: 1732101675346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,347 DEBUG [Thread-1025 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8204 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:20:15,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:15,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52016 deadline: 1732101675348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,349 DEBUG [Thread-1021 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8205 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:20:15,421 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/87899576ba224943b691c92209ff6016 2024-11-20T11:20:15,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/08bd96ef799642b59707353774191f68 is 50, key is test_row_0/C:col10/1732101613978/Put/seqid=0 2024-11-20T11:20:15,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742072_1248 (size=12151) 2024-11-20T11:20:15,491 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T11:20:15,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:15,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,644 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T11:20:15,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:15,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T11:20:15,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:15,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101675719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:15,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101675722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,797 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T11:20:15,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:15,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:15,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/08bd96ef799642b59707353774191f68 2024-11-20T11:20:15,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/e0a507ebb7b3431ca64894ddc230bd43 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/e0a507ebb7b3431ca64894ddc230bd43 2024-11-20T11:20:15,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/e0a507ebb7b3431ca64894ddc230bd43, entries=150, sequenceid=152, filesize=11.9 K 2024-11-20T11:20:15,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/87899576ba224943b691c92209ff6016 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/87899576ba224943b691c92209ff6016 2024-11-20T11:20:15,852 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/87899576ba224943b691c92209ff6016, entries=150, sequenceid=152, filesize=11.9 K 2024-11-20T11:20:15,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/08bd96ef799642b59707353774191f68 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/08bd96ef799642b59707353774191f68 2024-11-20T11:20:15,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/08bd96ef799642b59707353774191f68, entries=150, sequenceid=152, filesize=11.9 K 2024-11-20T11:20:15,858 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for dda426a558461ab734d5c9192624badc in 1269ms, sequenceid=152, compaction requested=true 2024-11-20T11:20:15,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:15,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:15,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:15,858 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:20:15,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:15,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:15,858 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:20:15,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:15,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:15,860 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:20:15,860 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:20:15,860 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/B is initiating minor compaction (all files) 2024-11-20T11:20:15,860 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/A is initiating minor compaction (all files) 2024-11-20T11:20:15,860 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/B in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,860 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/A in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,860 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/2883d683efcd490ebd2093ede81847d6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/261f2e7be5d44592925a4547183d9c4b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/fd1c7d0e58af45788548b34111989416, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/87899576ba224943b691c92209ff6016] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=47.2 K 2024-11-20T11:20:15,860 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ae53f675783d4cdf884465df4f2c7326, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/cb2450f3272b4eb9a4c4148fe801d1a5, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/b352297cb2724dda8c3afea87e727122, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/e0a507ebb7b3431ca64894ddc230bd43] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=47.2 K 2024-11-20T11:20:15,861 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 2883d683efcd490ebd2093ede81847d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732101610066 2024-11-20T11:20:15,861 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae53f675783d4cdf884465df4f2c7326, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732101610066 2024-11-20T11:20:15,861 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb2450f3272b4eb9a4c4148fe801d1a5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732101611210 2024-11-20T11:20:15,861 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 261f2e7be5d44592925a4547183d9c4b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732101611210 2024-11-20T11:20:15,861 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting b352297cb2724dda8c3afea87e727122, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732101611823 2024-11-20T11:20:15,861 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting fd1c7d0e58af45788548b34111989416, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732101611823 2024-11-20T11:20:15,862 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0a507ebb7b3431ca64894ddc230bd43, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732101613977 2024-11-20T11:20:15,862 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 87899576ba224943b691c92209ff6016, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732101613977 2024-11-20T11:20:15,871 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#A#compaction#208 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:15,871 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/3efe9a51b7e64d798ed0d9fcf5da00f6 is 50, key is test_row_0/A:col10/1732101613978/Put/seqid=0 2024-11-20T11:20:15,871 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#B#compaction#207 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:15,872 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/6ebfd336f3de40eaa1ea0725da374f0e is 50, key is test_row_0/B:col10/1732101613978/Put/seqid=0 2024-11-20T11:20:15,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742074_1250 (size=12493) 2024-11-20T11:20:15,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742073_1249 (size=12493) 2024-11-20T11:20:15,894 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/6ebfd336f3de40eaa1ea0725da374f0e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/6ebfd336f3de40eaa1ea0725da374f0e 2024-11-20T11:20:15,895 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/3efe9a51b7e64d798ed0d9fcf5da00f6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/3efe9a51b7e64d798ed0d9fcf5da00f6 2024-11-20T11:20:15,900 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in dda426a558461ab734d5c9192624badc/B of dda426a558461ab734d5c9192624badc into 6ebfd336f3de40eaa1ea0725da374f0e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:15,900 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:15,900 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/B, priority=12, startTime=1732101615858; duration=0sec 2024-11-20T11:20:15,900 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:15,900 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:B 2024-11-20T11:20:15,901 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:20:15,901 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in dda426a558461ab734d5c9192624badc/A of dda426a558461ab734d5c9192624badc into 3efe9a51b7e64d798ed0d9fcf5da00f6(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:15,901 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:15,901 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/A, priority=12, startTime=1732101615858; duration=0sec 2024-11-20T11:20:15,902 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:15,902 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:A 2024-11-20T11:20:15,902 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:20:15,902 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/C is initiating minor compaction (all files) 2024-11-20T11:20:15,902 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/C in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,902 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2e799bbca71d4401ab7f67a48c48e798, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/36925946384d45b9999b183fb573f358, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/e22551da4a7c467693bf45832f7feb57, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/08bd96ef799642b59707353774191f68] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=47.2 K 2024-11-20T11:20:15,903 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e799bbca71d4401ab7f67a48c48e798, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732101610066 2024-11-20T11:20:15,903 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 36925946384d45b9999b183fb573f358, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732101611210 2024-11-20T11:20:15,904 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting e22551da4a7c467693bf45832f7feb57, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732101611823 2024-11-20T11:20:15,904 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 08bd96ef799642b59707353774191f68, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732101613977 2024-11-20T11:20:15,914 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#C#compaction#209 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:15,914 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/45df400150b54ce6a220d470f02b42be is 50, key is test_row_0/C:col10/1732101613978/Put/seqid=0 2024-11-20T11:20:15,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742075_1251 (size=12493) 2024-11-20T11:20:15,925 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/45df400150b54ce6a220d470f02b42be as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/45df400150b54ce6a220d470f02b42be 2024-11-20T11:20:15,931 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in dda426a558461ab734d5c9192624badc/C of dda426a558461ab734d5c9192624badc into 45df400150b54ce6a220d470f02b42be(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:15,931 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:15,931 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/C, priority=12, startTime=1732101615858; duration=0sec 2024-11-20T11:20:15,931 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:15,931 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:C 2024-11-20T11:20:15,950 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:15,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T11:20:15,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:15,951 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T11:20:15,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:15,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:15,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:15,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:15,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:15,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:15,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/c2835f082ec6474a9fcb0fe0b5a803db is 50, key is test_row_0/A:col10/1732101614606/Put/seqid=0 2024-11-20T11:20:15,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742076_1252 (size=12151) 2024-11-20T11:20:16,362 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/c2835f082ec6474a9fcb0fe0b5a803db 2024-11-20T11:20:16,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/14cd88f47ebc42468d51fbe5b8496616 is 50, key is test_row_0/B:col10/1732101614606/Put/seqid=0 2024-11-20T11:20:16,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742077_1253 (size=12151) 2024-11-20T11:20:16,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T11:20:16,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:16,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:16,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:16,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:16,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101676749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:16,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101676749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:16,777 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/14cd88f47ebc42468d51fbe5b8496616 2024-11-20T11:20:16,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/a3c7874996864770838585e628053e37 is 50, key is test_row_0/C:col10/1732101614606/Put/seqid=0 2024-11-20T11:20:16,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742078_1254 (size=12151) 2024-11-20T11:20:16,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:16,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101676851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:16,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:16,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101676851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:17,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:17,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101677053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:17,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:17,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101677054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:17,192 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/a3c7874996864770838585e628053e37 2024-11-20T11:20:17,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/c2835f082ec6474a9fcb0fe0b5a803db as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c2835f082ec6474a9fcb0fe0b5a803db 2024-11-20T11:20:17,202 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c2835f082ec6474a9fcb0fe0b5a803db, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T11:20:17,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/14cd88f47ebc42468d51fbe5b8496616 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/14cd88f47ebc42468d51fbe5b8496616 2024-11-20T11:20:17,207 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/14cd88f47ebc42468d51fbe5b8496616, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T11:20:17,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/a3c7874996864770838585e628053e37 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/a3c7874996864770838585e628053e37 2024-11-20T11:20:17,212 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/a3c7874996864770838585e628053e37, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T11:20:17,213 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for dda426a558461ab734d5c9192624badc in 1261ms, sequenceid=169, compaction requested=false 2024-11-20T11:20:17,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:17,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:17,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-20T11:20:17,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-20T11:20:17,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-20T11:20:17,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6390 sec 2024-11-20T11:20:17,216 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 2.6430 sec 2024-11-20T11:20:17,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:17,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T11:20:17,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:17,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:17,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:17,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:17,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:17,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:17,363 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/125dbb58d4d14c64a38a8c2ad96329e2 is 50, key is test_row_0/A:col10/1732101616743/Put/seqid=0 2024-11-20T11:20:17,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742079_1255 (size=12151) 2024-11-20T11:20:17,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:17,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:17,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101677374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:17,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101677374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:17,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:17,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101677477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:17,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:17,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101677477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:17,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:17,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101677680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:17,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:17,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101677680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:17,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/125dbb58d4d14c64a38a8c2ad96329e2 2024-11-20T11:20:17,778 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/ad98f0fba58d45e0b7dd93f6e1d7d63b is 50, key is test_row_0/B:col10/1732101616743/Put/seqid=0 2024-11-20T11:20:17,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742080_1256 (size=12151) 2024-11-20T11:20:17,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:17,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101677982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:17,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:17,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101677983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:18,185 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/ad98f0fba58d45e0b7dd93f6e1d7d63b 2024-11-20T11:20:18,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/2acd6ac382a34336b52c6724f3103433 is 50, key is test_row_0/C:col10/1732101616743/Put/seqid=0 2024-11-20T11:20:18,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742081_1257 (size=12151) 2024-11-20T11:20:18,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101678484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:18,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:18,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101678485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:18,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/2acd6ac382a34336b52c6724f3103433 2024-11-20T11:20:18,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/125dbb58d4d14c64a38a8c2ad96329e2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/125dbb58d4d14c64a38a8c2ad96329e2 2024-11-20T11:20:18,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/125dbb58d4d14c64a38a8c2ad96329e2, entries=150, sequenceid=192, filesize=11.9 K 2024-11-20T11:20:18,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/ad98f0fba58d45e0b7dd93f6e1d7d63b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/ad98f0fba58d45e0b7dd93f6e1d7d63b 2024-11-20T11:20:18,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/ad98f0fba58d45e0b7dd93f6e1d7d63b, entries=150, sequenceid=192, filesize=11.9 K 2024-11-20T11:20:18,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/2acd6ac382a34336b52c6724f3103433 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2acd6ac382a34336b52c6724f3103433 2024-11-20T11:20:18,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2acd6ac382a34336b52c6724f3103433, entries=150, sequenceid=192, filesize=11.9 K 2024-11-20T11:20:18,618 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for dda426a558461ab734d5c9192624badc in 1259ms, sequenceid=192, compaction requested=true 2024-11-20T11:20:18,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:18,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:18,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:18,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:18,618 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:18,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:18,618 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:18,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:18,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:18,619 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:18,619 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:18,619 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/A is initiating minor compaction (all files) 2024-11-20T11:20:18,619 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/B is initiating minor compaction (all files) 2024-11-20T11:20:18,619 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/A in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:18,619 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/B in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:18,619 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/3efe9a51b7e64d798ed0d9fcf5da00f6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c2835f082ec6474a9fcb0fe0b5a803db, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/125dbb58d4d14c64a38a8c2ad96329e2] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=35.9 K 2024-11-20T11:20:18,619 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/6ebfd336f3de40eaa1ea0725da374f0e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/14cd88f47ebc42468d51fbe5b8496616, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/ad98f0fba58d45e0b7dd93f6e1d7d63b] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=35.9 K 2024-11-20T11:20:18,620 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ebfd336f3de40eaa1ea0725da374f0e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732101613977 2024-11-20T11:20:18,620 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3efe9a51b7e64d798ed0d9fcf5da00f6, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732101613977 2024-11-20T11:20:18,620 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 14cd88f47ebc42468d51fbe5b8496616, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732101614603 2024-11-20T11:20:18,620 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2835f082ec6474a9fcb0fe0b5a803db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732101614603 2024-11-20T11:20:18,620 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 125dbb58d4d14c64a38a8c2ad96329e2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732101616743 2024-11-20T11:20:18,620 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting ad98f0fba58d45e0b7dd93f6e1d7d63b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732101616743 2024-11-20T11:20:18,628 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#B#compaction#217 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:18,628 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#A#compaction#216 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:18,628 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/b18c13624aaf479cb95d36be4c9def52 is 50, key is test_row_0/B:col10/1732101616743/Put/seqid=0 2024-11-20T11:20:18,628 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/52bbc8fd6e4747d9ba7508f95915a89b is 50, key is test_row_0/A:col10/1732101616743/Put/seqid=0 2024-11-20T11:20:18,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742082_1258 (size=12595) 2024-11-20T11:20:18,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742083_1259 (size=12595) 2024-11-20T11:20:18,641 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/52bbc8fd6e4747d9ba7508f95915a89b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/52bbc8fd6e4747d9ba7508f95915a89b 2024-11-20T11:20:18,645 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/A of dda426a558461ab734d5c9192624badc into 52bbc8fd6e4747d9ba7508f95915a89b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:18,645 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:18,645 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/A, priority=13, startTime=1732101618618; duration=0sec 2024-11-20T11:20:18,645 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:18,645 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:A 2024-11-20T11:20:18,645 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:18,646 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:18,646 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/C is initiating minor compaction (all files) 2024-11-20T11:20:18,647 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/C in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:18,647 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/45df400150b54ce6a220d470f02b42be, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/a3c7874996864770838585e628053e37, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2acd6ac382a34336b52c6724f3103433] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=35.9 K 2024-11-20T11:20:18,647 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45df400150b54ce6a220d470f02b42be, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732101613977 2024-11-20T11:20:18,648 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3c7874996864770838585e628053e37, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732101614603 2024-11-20T11:20:18,648 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2acd6ac382a34336b52c6724f3103433, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732101616743 2024-11-20T11:20:18,655 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#C#compaction#218 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:18,656 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/31ac0059b24a4bbabf67874892fb8db7 is 50, key is test_row_0/C:col10/1732101616743/Put/seqid=0 2024-11-20T11:20:18,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742084_1260 (size=12595) 2024-11-20T11:20:18,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T11:20:18,679 INFO [Thread-1031 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-20T11:20:18,680 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:18,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-20T11:20:18,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T11:20:18,681 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:18,682 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:18,682 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:18,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T11:20:18,833 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:18,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T11:20:18,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:18,834 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T11:20:18,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:18,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:18,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:18,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:18,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:18,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:18,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/7659841c1f804a0b9ddc816c912560c9 is 50, key is test_row_0/A:col10/1732101617372/Put/seqid=0 2024-11-20T11:20:18,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742085_1261 (size=12151) 2024-11-20T11:20:18,844 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/7659841c1f804a0b9ddc816c912560c9 2024-11-20T11:20:18,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/b06f442780c14361860afa717c268628 is 50, key is test_row_0/B:col10/1732101617372/Put/seqid=0 2024-11-20T11:20:18,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742086_1262 (size=12151) 2024-11-20T11:20:18,856 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/b06f442780c14361860afa717c268628 2024-11-20T11:20:18,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/2eba759a38764b3a8aadc78561791233 is 50, key is test_row_0/C:col10/1732101617372/Put/seqid=0 2024-11-20T11:20:18,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742087_1263 (size=12151) 2024-11-20T11:20:18,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T11:20:19,042 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/b18c13624aaf479cb95d36be4c9def52 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/b18c13624aaf479cb95d36be4c9def52 2024-11-20T11:20:19,047 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/B of dda426a558461ab734d5c9192624badc into b18c13624aaf479cb95d36be4c9def52(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:19,047 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:19,047 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/B, priority=13, startTime=1732101618618; duration=0sec 2024-11-20T11:20:19,048 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:19,048 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:B 2024-11-20T11:20:19,067 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/31ac0059b24a4bbabf67874892fb8db7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/31ac0059b24a4bbabf67874892fb8db7 2024-11-20T11:20:19,073 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/C of dda426a558461ab734d5c9192624badc into 31ac0059b24a4bbabf67874892fb8db7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:19,073 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:19,073 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/C, priority=13, startTime=1732101618618; duration=0sec 2024-11-20T11:20:19,073 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:19,073 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:C 2024-11-20T11:20:19,273 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/2eba759a38764b3a8aadc78561791233 2024-11-20T11:20:19,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/7659841c1f804a0b9ddc816c912560c9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/7659841c1f804a0b9ddc816c912560c9 2024-11-20T11:20:19,282 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/7659841c1f804a0b9ddc816c912560c9, entries=150, sequenceid=206, filesize=11.9 K 2024-11-20T11:20:19,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/b06f442780c14361860afa717c268628 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/b06f442780c14361860afa717c268628 2024-11-20T11:20:19,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T11:20:19,287 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/b06f442780c14361860afa717c268628, entries=150, sequenceid=206, filesize=11.9 K 2024-11-20T11:20:19,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/2eba759a38764b3a8aadc78561791233 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2eba759a38764b3a8aadc78561791233 2024-11-20T11:20:19,298 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2eba759a38764b3a8aadc78561791233, entries=150, sequenceid=206, filesize=11.9 K 2024-11-20T11:20:19,299 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for dda426a558461ab734d5c9192624badc in 465ms, sequenceid=206, compaction requested=false 2024-11-20T11:20:19,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:19,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:19,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-20T11:20:19,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-20T11:20:19,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-20T11:20:19,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 619 msec 2024-11-20T11:20:19,306 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 625 msec 2024-11-20T11:20:19,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:19,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T11:20:19,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:19,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:19,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:19,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:19,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:19,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:19,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/3d7ddea78e9746359c63a9c1bf981d4b is 50, key is test_row_0/A:col10/1732101619496/Put/seqid=0 2024-11-20T11:20:19,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742088_1264 (size=12151) 2024-11-20T11:20:19,516 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/3d7ddea78e9746359c63a9c1bf981d4b 2024-11-20T11:20:19,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/eab7fb87b4db4544a83c44244184547a is 50, key is test_row_0/B:col10/1732101619496/Put/seqid=0 2024-11-20T11:20:19,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:19,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101679526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:19,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:19,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101679528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:19,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742089_1265 (size=12151) 2024-11-20T11:20:19,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/eab7fb87b4db4544a83c44244184547a 2024-11-20T11:20:19,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/479bdef0c5904b1cb53b2bfced24beb0 is 50, key is test_row_0/C:col10/1732101619496/Put/seqid=0 2024-11-20T11:20:19,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742090_1266 (size=12151) 2024-11-20T11:20:19,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:19,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101679629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:19,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:19,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101679630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:19,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T11:20:19,785 INFO [Thread-1031 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-20T11:20:19,788 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:19,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-20T11:20:19,790 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:19,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T11:20:19,790 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:19,790 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:19,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:19,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101679832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:19,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:19,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101679834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:19,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T11:20:19,942 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:19,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T11:20:19,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:19,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:19,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:19,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:19,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:19,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:19,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/479bdef0c5904b1cb53b2bfced24beb0 2024-11-20T11:20:19,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/3d7ddea78e9746359c63a9c1bf981d4b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/3d7ddea78e9746359c63a9c1bf981d4b 2024-11-20T11:20:19,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/3d7ddea78e9746359c63a9c1bf981d4b, entries=150, sequenceid=219, filesize=11.9 K 2024-11-20T11:20:19,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/eab7fb87b4db4544a83c44244184547a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/eab7fb87b4db4544a83c44244184547a 2024-11-20T11:20:19,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/eab7fb87b4db4544a83c44244184547a, entries=150, sequenceid=219, filesize=11.9 K 2024-11-20T11:20:19,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/479bdef0c5904b1cb53b2bfced24beb0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/479bdef0c5904b1cb53b2bfced24beb0 2024-11-20T11:20:19,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/479bdef0c5904b1cb53b2bfced24beb0, entries=150, sequenceid=219, filesize=11.9 K 2024-11-20T11:20:19,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for dda426a558461ab734d5c9192624badc in 480ms, sequenceid=219, compaction requested=true 2024-11-20T11:20:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:19,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:19,979 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:19,979 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:19,980 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:19,980 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:19,980 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/B is initiating minor compaction (all files) 2024-11-20T11:20:19,980 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/A is initiating minor compaction (all files) 2024-11-20T11:20:19,980 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/B in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:19,980 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/A in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:19,980 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/52bbc8fd6e4747d9ba7508f95915a89b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/7659841c1f804a0b9ddc816c912560c9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/3d7ddea78e9746359c63a9c1bf981d4b] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=36.0 K 2024-11-20T11:20:19,980 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/b18c13624aaf479cb95d36be4c9def52, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/b06f442780c14361860afa717c268628, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/eab7fb87b4db4544a83c44244184547a] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=36.0 K 2024-11-20T11:20:19,981 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52bbc8fd6e4747d9ba7508f95915a89b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732101616743 2024-11-20T11:20:19,981 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting b18c13624aaf479cb95d36be4c9def52, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732101616743 2024-11-20T11:20:19,981 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7659841c1f804a0b9ddc816c912560c9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732101617365 2024-11-20T11:20:19,981 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting b06f442780c14361860afa717c268628, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732101617365 2024-11-20T11:20:19,982 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d7ddea78e9746359c63a9c1bf981d4b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732101619493 2024-11-20T11:20:19,982 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting eab7fb87b4db4544a83c44244184547a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732101619493 2024-11-20T11:20:19,989 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#A#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:19,990 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#B#compaction#226 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:19,990 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/19a4913d54e24330ac82fbd6483112bf is 50, key is test_row_0/A:col10/1732101619496/Put/seqid=0 2024-11-20T11:20:19,990 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/7846313f6752440cb775e26552a031b7 is 50, key is test_row_0/B:col10/1732101619496/Put/seqid=0 2024-11-20T11:20:19,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742091_1267 (size=12697) 2024-11-20T11:20:20,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742092_1268 (size=12697) 2024-11-20T11:20:20,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T11:20:20,096 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:20,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-20T11:20:20,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:20,097 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T11:20:20,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:20,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:20,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:20,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:20,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:20,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:20,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/c2ff44303a094818be5b7aeffe883749 is 50, key is test_row_0/A:col10/1732101619522/Put/seqid=0 2024-11-20T11:20:20,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742093_1269 (size=12151) 2024-11-20T11:20:20,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:20,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:20,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:20,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101680150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:20,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:20,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101680150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:20,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:20,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101680253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:20,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:20,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101680253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:20,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T11:20:20,402 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/19a4913d54e24330ac82fbd6483112bf as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/19a4913d54e24330ac82fbd6483112bf 2024-11-20T11:20:20,407 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/A of dda426a558461ab734d5c9192624badc into 19a4913d54e24330ac82fbd6483112bf(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:20,407 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:20,407 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/A, priority=13, startTime=1732101619979; duration=0sec 2024-11-20T11:20:20,407 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:20,407 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:A 2024-11-20T11:20:20,407 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:20,408 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:20,409 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/C is initiating minor compaction (all files) 2024-11-20T11:20:20,409 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/C in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:20,409 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/31ac0059b24a4bbabf67874892fb8db7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2eba759a38764b3a8aadc78561791233, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/479bdef0c5904b1cb53b2bfced24beb0] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=36.0 K 2024-11-20T11:20:20,409 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31ac0059b24a4bbabf67874892fb8db7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732101616743 2024-11-20T11:20:20,410 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2eba759a38764b3a8aadc78561791233, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732101617365 2024-11-20T11:20:20,410 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 479bdef0c5904b1cb53b2bfced24beb0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732101619493 2024-11-20T11:20:20,411 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/7846313f6752440cb775e26552a031b7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/7846313f6752440cb775e26552a031b7 2024-11-20T11:20:20,432 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/B of dda426a558461ab734d5c9192624badc into 7846313f6752440cb775e26552a031b7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:20,432 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:20,432 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/B, priority=13, startTime=1732101619979; duration=0sec 2024-11-20T11:20:20,432 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:20,432 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:B 2024-11-20T11:20:20,433 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#C#compaction#228 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:20,434 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/479654a73b114e8e9c2135fe0bced56f is 50, key is test_row_0/C:col10/1732101619496/Put/seqid=0 2024-11-20T11:20:20,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742094_1270 (size=12697) 2024-11-20T11:20:20,443 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/479654a73b114e8e9c2135fe0bced56f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/479654a73b114e8e9c2135fe0bced56f 2024-11-20T11:20:20,448 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/C of dda426a558461ab734d5c9192624badc into 479654a73b114e8e9c2135fe0bced56f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:20,448 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:20,448 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/C, priority=13, startTime=1732101619979; duration=0sec 2024-11-20T11:20:20,448 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:20,448 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:C 2024-11-20T11:20:20,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:20,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:20,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101680457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:20,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101680457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:20,506 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/c2ff44303a094818be5b7aeffe883749 2024-11-20T11:20:20,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/21559dfb507649508673238bed7b7a07 is 50, key is test_row_0/B:col10/1732101619522/Put/seqid=0 2024-11-20T11:20:20,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742095_1271 (size=12151) 2024-11-20T11:20:20,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:20,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101680759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:20,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:20,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101680759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:20,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T11:20:20,925 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/21559dfb507649508673238bed7b7a07 2024-11-20T11:20:20,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/27ec6ae3f6e443be86c1b2259d8e3c1f is 50, key is test_row_0/C:col10/1732101619522/Put/seqid=0 2024-11-20T11:20:20,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742096_1272 (size=12151) 2024-11-20T11:20:21,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:21,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101681262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:21,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:21,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101681264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:21,346 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/27ec6ae3f6e443be86c1b2259d8e3c1f 2024-11-20T11:20:21,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/c2ff44303a094818be5b7aeffe883749 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c2ff44303a094818be5b7aeffe883749 2024-11-20T11:20:21,362 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c2ff44303a094818be5b7aeffe883749, entries=150, sequenceid=244, filesize=11.9 K 2024-11-20T11:20:21,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/21559dfb507649508673238bed7b7a07 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/21559dfb507649508673238bed7b7a07 2024-11-20T11:20:21,368 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/21559dfb507649508673238bed7b7a07, entries=150, sequenceid=244, filesize=11.9 K 2024-11-20T11:20:21,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/27ec6ae3f6e443be86c1b2259d8e3c1f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/27ec6ae3f6e443be86c1b2259d8e3c1f 2024-11-20T11:20:21,373 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/27ec6ae3f6e443be86c1b2259d8e3c1f, entries=150, sequenceid=244, filesize=11.9 K 2024-11-20T11:20:21,374 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for dda426a558461ab734d5c9192624badc in 1277ms, sequenceid=244, compaction requested=false 2024-11-20T11:20:21,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:21,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:21,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-20T11:20:21,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-20T11:20:21,376 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-20T11:20:21,376 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5850 sec 2024-11-20T11:20:21,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.5890 sec 2024-11-20T11:20:21,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T11:20:21,894 INFO [Thread-1031 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-20T11:20:21,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:21,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-20T11:20:21,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T11:20:21,897 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:21,897 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:21,897 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:21,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T11:20:22,049 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:22,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-20T11:20:22,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:22,050 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T11:20:22,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:22,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:22,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:22,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:22,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:22,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:22,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/c936efe0993c4bffae5d648e1841150f is 50, key is test_row_0/A:col10/1732101620149/Put/seqid=0 2024-11-20T11:20:22,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742097_1273 (size=12151) 2024-11-20T11:20:22,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T11:20:22,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:22,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:22,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:22,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101682299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:22,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:22,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101682316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:22,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:22,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101682403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:22,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:22,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101682418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:22,461 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/c936efe0993c4bffae5d648e1841150f 2024-11-20T11:20:22,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/09ec76056c594a21b02298ba46cb6ac1 is 50, key is test_row_0/B:col10/1732101620149/Put/seqid=0 2024-11-20T11:20:22,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742098_1274 (size=12151) 2024-11-20T11:20:22,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T11:20:22,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101682605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:22,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:22,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101682620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:22,873 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/09ec76056c594a21b02298ba46cb6ac1 2024-11-20T11:20:22,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/b4d82bd37d36408fa300855194d7c0fd is 50, key is test_row_0/C:col10/1732101620149/Put/seqid=0 2024-11-20T11:20:22,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742099_1275 (size=12151) 2024-11-20T11:20:22,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:22,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101682908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:22,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:22,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101682924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:23,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T11:20:23,288 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/b4d82bd37d36408fa300855194d7c0fd 2024-11-20T11:20:23,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/c936efe0993c4bffae5d648e1841150f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c936efe0993c4bffae5d648e1841150f 2024-11-20T11:20:23,297 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c936efe0993c4bffae5d648e1841150f, entries=150, sequenceid=258, filesize=11.9 K 2024-11-20T11:20:23,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/09ec76056c594a21b02298ba46cb6ac1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/09ec76056c594a21b02298ba46cb6ac1 2024-11-20T11:20:23,301 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/09ec76056c594a21b02298ba46cb6ac1, entries=150, sequenceid=258, filesize=11.9 K 2024-11-20T11:20:23,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/b4d82bd37d36408fa300855194d7c0fd as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/b4d82bd37d36408fa300855194d7c0fd 2024-11-20T11:20:23,306 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/b4d82bd37d36408fa300855194d7c0fd, entries=150, sequenceid=258, filesize=11.9 K 2024-11-20T11:20:23,307 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for dda426a558461ab734d5c9192624badc in 1257ms, sequenceid=258, compaction requested=true 2024-11-20T11:20:23,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:23,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:23,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-20T11:20:23,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-20T11:20:23,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-20T11:20:23,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4110 sec 2024-11-20T11:20:23,311 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.4150 sec 2024-11-20T11:20:23,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:23,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T11:20:23,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:23,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:23,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:23,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:23,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:23,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:23,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/df076787b998443bb36b54718ac17694 is 50, key is test_row_0/A:col10/1732101622287/Put/seqid=0 2024-11-20T11:20:23,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742100_1276 (size=14741) 2024-11-20T11:20:23,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:23,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101683429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:23,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:23,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101683430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:23,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:23,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101683532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:23,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:23,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101683534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:23,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:23,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101683735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:23,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:23,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101683738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:23,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/df076787b998443bb36b54718ac17694 2024-11-20T11:20:23,836 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/06fb3a40031b4b3aaa80b537806e1738 is 50, key is test_row_0/B:col10/1732101622287/Put/seqid=0 2024-11-20T11:20:23,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742101_1277 (size=12301) 2024-11-20T11:20:24,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-20T11:20:24,001 INFO [Thread-1031 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-20T11:20:24,002 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:24,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-11-20T11:20:24,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T11:20:24,004 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:24,004 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:24,004 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:24,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:24,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101684037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:24,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:24,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101684042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:24,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T11:20:24,156 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:24,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T11:20:24,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:24,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:24,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:24,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:24,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:24,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:24,242 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/06fb3a40031b4b3aaa80b537806e1738 2024-11-20T11:20:24,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/7e16e50135784fb5a2fbc1dcd5f64c7e is 50, key is test_row_0/C:col10/1732101622287/Put/seqid=0 2024-11-20T11:20:24,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742102_1278 (size=12301) 2024-11-20T11:20:24,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/7e16e50135784fb5a2fbc1dcd5f64c7e 2024-11-20T11:20:24,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/df076787b998443bb36b54718ac17694 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/df076787b998443bb36b54718ac17694 2024-11-20T11:20:24,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/df076787b998443bb36b54718ac17694, entries=200, sequenceid=285, filesize=14.4 K 2024-11-20T11:20:24,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/06fb3a40031b4b3aaa80b537806e1738 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/06fb3a40031b4b3aaa80b537806e1738 2024-11-20T11:20:24,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/06fb3a40031b4b3aaa80b537806e1738, entries=150, sequenceid=285, filesize=12.0 K 2024-11-20T11:20:24,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/7e16e50135784fb5a2fbc1dcd5f64c7e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7e16e50135784fb5a2fbc1dcd5f64c7e 2024-11-20T11:20:24,272 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7e16e50135784fb5a2fbc1dcd5f64c7e, entries=150, sequenceid=285, filesize=12.0 K 2024-11-20T11:20:24,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for dda426a558461ab734d5c9192624badc in 857ms, sequenceid=285, compaction requested=true 2024-11-20T11:20:24,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:24,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:24,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:24,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:24,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:24,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:24,273 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:20:24,273 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:20:24,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:24,275 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49300 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:20:24,275 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/B is initiating minor compaction (all files) 2024-11-20T11:20:24,275 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/B in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:24,275 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/7846313f6752440cb775e26552a031b7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/21559dfb507649508673238bed7b7a07, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/09ec76056c594a21b02298ba46cb6ac1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/06fb3a40031b4b3aaa80b537806e1738] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=48.1 K 2024-11-20T11:20:24,276 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51740 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:20:24,276 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 7846313f6752440cb775e26552a031b7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732101619493 2024-11-20T11:20:24,276 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/A is initiating minor compaction (all files) 2024-11-20T11:20:24,276 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/A in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:24,276 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/19a4913d54e24330ac82fbd6483112bf, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c2ff44303a094818be5b7aeffe883749, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c936efe0993c4bffae5d648e1841150f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/df076787b998443bb36b54718ac17694] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=50.5 K 2024-11-20T11:20:24,276 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 21559dfb507649508673238bed7b7a07, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732101619522 2024-11-20T11:20:24,276 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19a4913d54e24330ac82fbd6483112bf, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732101619493 2024-11-20T11:20:24,276 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 09ec76056c594a21b02298ba46cb6ac1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1732101620138 2024-11-20T11:20:24,277 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2ff44303a094818be5b7aeffe883749, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732101619522 2024-11-20T11:20:24,277 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 06fb3a40031b4b3aaa80b537806e1738, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732101622287 2024-11-20T11:20:24,277 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting c936efe0993c4bffae5d648e1841150f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1732101620138 2024-11-20T11:20:24,278 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting df076787b998443bb36b54718ac17694, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732101622287 2024-11-20T11:20:24,289 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#A#compaction#238 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:24,290 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/50c726a5e3c44465b39e8c14873ae422 is 50, key is test_row_0/A:col10/1732101622287/Put/seqid=0 2024-11-20T11:20:24,293 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#B#compaction#237 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:24,293 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/2a528400f53d4f3dbe666f1b970fd51c is 50, key is test_row_0/B:col10/1732101622287/Put/seqid=0 2024-11-20T11:20:24,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T11:20:24,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742103_1279 (size=12983) 2024-11-20T11:20:24,309 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:24,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-20T11:20:24,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:24,310 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T11:20:24,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:24,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:24,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:24,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:24,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:24,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:24,313 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/50c726a5e3c44465b39e8c14873ae422 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/50c726a5e3c44465b39e8c14873ae422 2024-11-20T11:20:24,318 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in dda426a558461ab734d5c9192624badc/A of dda426a558461ab734d5c9192624badc into 50c726a5e3c44465b39e8c14873ae422(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:24,318 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:24,318 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/A, priority=12, startTime=1732101624273; duration=0sec 2024-11-20T11:20:24,318 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:24,318 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:A 2024-11-20T11:20:24,318 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:20:24,320 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49300 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:20:24,320 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/C is initiating minor compaction (all files) 2024-11-20T11:20:24,320 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/C in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:24,320 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/479654a73b114e8e9c2135fe0bced56f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/27ec6ae3f6e443be86c1b2259d8e3c1f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/b4d82bd37d36408fa300855194d7c0fd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7e16e50135784fb5a2fbc1dcd5f64c7e] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=48.1 K 2024-11-20T11:20:24,321 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 479654a73b114e8e9c2135fe0bced56f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732101619493 2024-11-20T11:20:24,321 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27ec6ae3f6e443be86c1b2259d8e3c1f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1732101619522 2024-11-20T11:20:24,322 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4d82bd37d36408fa300855194d7c0fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1732101620138 2024-11-20T11:20:24,322 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e16e50135784fb5a2fbc1dcd5f64c7e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732101622287 2024-11-20T11:20:24,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/0c27465d31184da29f96fd2cda5a42c7 is 50, key is test_row_0/A:col10/1732101623426/Put/seqid=0 2024-11-20T11:20:24,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742104_1280 (size=12983) 2024-11-20T11:20:24,332 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#C#compaction#240 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:24,333 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/babdfa65a2d04adb89531e4af3aed47c is 50, key is test_row_0/C:col10/1732101622287/Put/seqid=0 2024-11-20T11:20:24,340 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/2a528400f53d4f3dbe666f1b970fd51c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/2a528400f53d4f3dbe666f1b970fd51c 2024-11-20T11:20:24,345 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in dda426a558461ab734d5c9192624badc/B of dda426a558461ab734d5c9192624badc into 2a528400f53d4f3dbe666f1b970fd51c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:24,345 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:24,345 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/B, priority=12, startTime=1732101624273; duration=0sec 2024-11-20T11:20:24,346 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:24,346 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:B 2024-11-20T11:20:24,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742105_1281 (size=12301) 2024-11-20T11:20:24,351 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/0c27465d31184da29f96fd2cda5a42c7 2024-11-20T11:20:24,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742106_1282 (size=12983) 2024-11-20T11:20:24,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/0045d1dda5c3487b9679261bdd926606 is 50, key is test_row_0/B:col10/1732101623426/Put/seqid=0 2024-11-20T11:20:24,374 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/babdfa65a2d04adb89531e4af3aed47c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/babdfa65a2d04adb89531e4af3aed47c 2024-11-20T11:20:24,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742107_1283 (size=12301) 2024-11-20T11:20:24,378 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/0045d1dda5c3487b9679261bdd926606 2024-11-20T11:20:24,381 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in dda426a558461ab734d5c9192624badc/C of dda426a558461ab734d5c9192624badc into babdfa65a2d04adb89531e4af3aed47c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:24,381 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:24,381 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/C, priority=12, startTime=1732101624273; duration=0sec 2024-11-20T11:20:24,382 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:24,382 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:C 2024-11-20T11:20:24,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/087bcfc5ec8a4715aed960eb4f226557 is 50, key is test_row_0/C:col10/1732101623426/Put/seqid=0 2024-11-20T11:20:24,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742108_1284 (size=12301) 2024-11-20T11:20:24,400 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/087bcfc5ec8a4715aed960eb4f226557 2024-11-20T11:20:24,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/0c27465d31184da29f96fd2cda5a42c7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/0c27465d31184da29f96fd2cda5a42c7 2024-11-20T11:20:24,412 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/0c27465d31184da29f96fd2cda5a42c7, entries=150, sequenceid=294, filesize=12.0 K 2024-11-20T11:20:24,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/0045d1dda5c3487b9679261bdd926606 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/0045d1dda5c3487b9679261bdd926606 2024-11-20T11:20:24,418 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/0045d1dda5c3487b9679261bdd926606, entries=150, sequenceid=294, filesize=12.0 K 2024-11-20T11:20:24,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/087bcfc5ec8a4715aed960eb4f226557 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/087bcfc5ec8a4715aed960eb4f226557 2024-11-20T11:20:24,424 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/087bcfc5ec8a4715aed960eb4f226557, entries=150, sequenceid=294, filesize=12.0 K 2024-11-20T11:20:24,425 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for dda426a558461ab734d5c9192624badc in 115ms, sequenceid=294, compaction requested=false 2024-11-20T11:20:24,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:24,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:24,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-11-20T11:20:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-11-20T11:20:24,429 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-20T11:20:24,429 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 423 msec 2024-11-20T11:20:24,431 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 427 msec 2024-11-20T11:20:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:24,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T11:20:24,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:24,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:24,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:24,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:24,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:24,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:24,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/ff3e8e4dee1d4ac6a18ce52195bc61f7 is 50, key is test_row_0/A:col10/1732101624552/Put/seqid=0 2024-11-20T11:20:24,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742109_1285 (size=12301) 2024-11-20T11:20:24,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101684579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:24,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101684580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T11:20:24,607 INFO [Thread-1031 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-20T11:20:24,608 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees 2024-11-20T11:20:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T11:20:24,610 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:24,610 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:24,610 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:24,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:24,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101684682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:24,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101684682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T11:20:24,762 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:24,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-20T11:20:24,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:24,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:24,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:24,763 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:24,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:24,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:24,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101684885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:24,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:24,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101684885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:24,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T11:20:24,915 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:24,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-20T11:20:24,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:24,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:24,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:24,915 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:24,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:24,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:24,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/ff3e8e4dee1d4ac6a18ce52195bc61f7 2024-11-20T11:20:24,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/d2fd4619e3d849cabc2abc12ed1ad834 is 50, key is test_row_0/B:col10/1732101624552/Put/seqid=0 2024-11-20T11:20:24,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742110_1286 (size=12301) 2024-11-20T11:20:24,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/d2fd4619e3d849cabc2abc12ed1ad834 2024-11-20T11:20:24,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/fcd02ef0c0464c10baa4297b56015880 is 50, key is test_row_0/C:col10/1732101624552/Put/seqid=0 2024-11-20T11:20:24,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742111_1287 (size=12301) 2024-11-20T11:20:24,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/fcd02ef0c0464c10baa4297b56015880 2024-11-20T11:20:24,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/ff3e8e4dee1d4ac6a18ce52195bc61f7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ff3e8e4dee1d4ac6a18ce52195bc61f7 2024-11-20T11:20:24,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ff3e8e4dee1d4ac6a18ce52195bc61f7, entries=150, sequenceid=309, filesize=12.0 K 2024-11-20T11:20:24,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/d2fd4619e3d849cabc2abc12ed1ad834 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/d2fd4619e3d849cabc2abc12ed1ad834 2024-11-20T11:20:25,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/d2fd4619e3d849cabc2abc12ed1ad834, entries=150, sequenceid=309, filesize=12.0 K 2024-11-20T11:20:25,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/fcd02ef0c0464c10baa4297b56015880 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/fcd02ef0c0464c10baa4297b56015880 2024-11-20T11:20:25,006 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/fcd02ef0c0464c10baa4297b56015880, entries=150, sequenceid=309, filesize=12.0 K 2024-11-20T11:20:25,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for dda426a558461ab734d5c9192624badc in 455ms, sequenceid=309, compaction requested=true 2024-11-20T11:20:25,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:25,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:25,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:25,008 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:25,008 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:25,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:25,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:25,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dda426a558461ab734d5c9192624badc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:25,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:25,009 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:25,009 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:25,009 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/B is initiating minor compaction (all files) 2024-11-20T11:20:25,009 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/A is initiating minor compaction (all files) 2024-11-20T11:20:25,009 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/B in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:25,009 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/A in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:25,009 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/2a528400f53d4f3dbe666f1b970fd51c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/0045d1dda5c3487b9679261bdd926606, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/d2fd4619e3d849cabc2abc12ed1ad834] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=36.7 K 2024-11-20T11:20:25,009 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/50c726a5e3c44465b39e8c14873ae422, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/0c27465d31184da29f96fd2cda5a42c7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ff3e8e4dee1d4ac6a18ce52195bc61f7] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=36.7 K 2024-11-20T11:20:25,010 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a528400f53d4f3dbe666f1b970fd51c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732101622287 2024-11-20T11:20:25,010 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50c726a5e3c44465b39e8c14873ae422, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732101622287 2024-11-20T11:20:25,010 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 0045d1dda5c3487b9679261bdd926606, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732101623423 2024-11-20T11:20:25,010 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c27465d31184da29f96fd2cda5a42c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732101623423 2024-11-20T11:20:25,010 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting d2fd4619e3d849cabc2abc12ed1ad834, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1732101624547 2024-11-20T11:20:25,011 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff3e8e4dee1d4ac6a18ce52195bc61f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1732101624547 2024-11-20T11:20:25,019 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#B#compaction#246 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:25,020 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#A#compaction#247 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:25,020 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/a0d1b71ee28d478e8d8311a007cb3067 is 50, key is test_row_0/B:col10/1732101624552/Put/seqid=0 2024-11-20T11:20:25,020 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/ae7e31ff785241dd8d32d2e200cc6653 is 50, key is test_row_0/A:col10/1732101624552/Put/seqid=0 2024-11-20T11:20:25,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742113_1289 (size=13085) 2024-11-20T11:20:25,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742112_1288 (size=13085) 2024-11-20T11:20:25,029 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/ae7e31ff785241dd8d32d2e200cc6653 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ae7e31ff785241dd8d32d2e200cc6653 2024-11-20T11:20:25,036 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/A of dda426a558461ab734d5c9192624badc into ae7e31ff785241dd8d32d2e200cc6653(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:25,036 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:25,036 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/A, priority=13, startTime=1732101625008; duration=0sec 2024-11-20T11:20:25,036 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:25,036 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:A 2024-11-20T11:20:25,036 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:25,037 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:25,037 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): dda426a558461ab734d5c9192624badc/C is initiating minor compaction (all files) 2024-11-20T11:20:25,037 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of dda426a558461ab734d5c9192624badc/C in TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:25,037 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/babdfa65a2d04adb89531e4af3aed47c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/087bcfc5ec8a4715aed960eb4f226557, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/fcd02ef0c0464c10baa4297b56015880] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp, totalSize=36.7 K 2024-11-20T11:20:25,037 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting babdfa65a2d04adb89531e4af3aed47c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732101622287 2024-11-20T11:20:25,038 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 087bcfc5ec8a4715aed960eb4f226557, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732101623423 2024-11-20T11:20:25,038 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting fcd02ef0c0464c10baa4297b56015880, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1732101624547 2024-11-20T11:20:25,046 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dda426a558461ab734d5c9192624badc#C#compaction#248 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:25,046 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/0204ff8b298f4587aa2c0b519a1497c2 is 50, key is test_row_0/C:col10/1732101624552/Put/seqid=0 2024-11-20T11:20:25,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742114_1290 (size=13085) 2024-11-20T11:20:25,055 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/0204ff8b298f4587aa2c0b519a1497c2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/0204ff8b298f4587aa2c0b519a1497c2 2024-11-20T11:20:25,060 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/C of dda426a558461ab734d5c9192624badc into 0204ff8b298f4587aa2c0b519a1497c2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:25,060 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:25,060 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/C, priority=13, startTime=1732101625008; duration=0sec 2024-11-20T11:20:25,060 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:25,060 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:C 2024-11-20T11:20:25,068 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:25,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-20T11:20:25,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:25,069 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T11:20:25,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:25,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:25,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:25,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:25,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:25,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:25,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/cef6679250854cb9947d7e68a1f2e389 is 50, key is test_row_0/A:col10/1732101624570/Put/seqid=0 2024-11-20T11:20:25,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742115_1291 (size=12301) 2024-11-20T11:20:25,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:25,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:25,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:25,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101685197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:25,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:25,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101685198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:25,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T11:20:25,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101685300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:25,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:25,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101685300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:25,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:25,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52036 deadline: 1732101685364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:25,365 DEBUG [Thread-1027 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18222 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:20:25,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:25,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52012 deadline: 1732101685388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:25,390 DEBUG [Thread-1025 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18247 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:20:25,431 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/a0d1b71ee28d478e8d8311a007cb3067 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/a0d1b71ee28d478e8d8311a007cb3067 2024-11-20T11:20:25,436 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in dda426a558461ab734d5c9192624badc/B of dda426a558461ab734d5c9192624badc into a0d1b71ee28d478e8d8311a007cb3067(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:25,436 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:25,436 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., storeName=dda426a558461ab734d5c9192624badc/B, priority=13, startTime=1732101625008; duration=0sec 2024-11-20T11:20:25,436 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:25,436 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dda426a558461ab734d5c9192624badc:B 2024-11-20T11:20:25,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:25,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52016 deadline: 1732101685444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:25,445 DEBUG [Thread-1021 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18301 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:20:25,478 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/cef6679250854cb9947d7e68a1f2e389 2024-11-20T11:20:25,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/9bdd830731b345a2843addc69c0a7610 is 50, key is test_row_0/B:col10/1732101624570/Put/seqid=0 2024-11-20T11:20:25,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742116_1292 (size=12301) 2024-11-20T11:20:25,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:25,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101685503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101685503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T11:20:25,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:25,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101685806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:25,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:25,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101685806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:25,892 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/9bdd830731b345a2843addc69c0a7610 2024-11-20T11:20:25,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/834fcd7671e7459e9cae3be51e8572c1 is 50, key is test_row_0/C:col10/1732101624570/Put/seqid=0 2024-11-20T11:20:25,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742117_1293 (size=12301) 2024-11-20T11:20:26,305 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/834fcd7671e7459e9cae3be51e8572c1 2024-11-20T11:20:26,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:26,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51964 deadline: 1732101686308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:26,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/cef6679250854cb9947d7e68a1f2e389 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/cef6679250854cb9947d7e68a1f2e389 2024-11-20T11:20:26,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:26,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51984 deadline: 1732101686311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:26,316 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/cef6679250854cb9947d7e68a1f2e389, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T11:20:26,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/9bdd830731b345a2843addc69c0a7610 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/9bdd830731b345a2843addc69c0a7610 2024-11-20T11:20:26,321 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/9bdd830731b345a2843addc69c0a7610, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T11:20:26,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/834fcd7671e7459e9cae3be51e8572c1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/834fcd7671e7459e9cae3be51e8572c1 2024-11-20T11:20:26,325 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/834fcd7671e7459e9cae3be51e8572c1, entries=150, sequenceid=336, filesize=12.0 K 2024-11-20T11:20:26,326 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for dda426a558461ab734d5c9192624badc in 1258ms, sequenceid=336, compaction requested=false 2024-11-20T11:20:26,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:26,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:26,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=94 2024-11-20T11:20:26,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=94 2024-11-20T11:20:26,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-20T11:20:26,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7170 sec 2024-11-20T11:20:26,330 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees in 1.7210 sec 2024-11-20T11:20:26,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-20T11:20:26,714 INFO [Thread-1031 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-20T11:20:26,715 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:26,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees 2024-11-20T11:20:26,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T11:20:26,717 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=95, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:26,717 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=95, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:26,717 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:26,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T11:20:26,869 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:26,869 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=96 2024-11-20T11:20:26,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:26,870 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T11:20:26,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:26,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:26,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:26,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:26,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:26,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:26,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/b9a1bd989dbe480893f58f829355456d is 50, key is test_row_0/A:col10/1732101625195/Put/seqid=0 2024-11-20T11:20:26,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742118_1294 (size=12301) 2024-11-20T11:20:27,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T11:20:27,144 DEBUG [Thread-1038 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e4c79b8 to 127.0.0.1:62733 2024-11-20T11:20:27,144 DEBUG [Thread-1038 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:27,144 DEBUG [Thread-1040 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d1403c3 to 127.0.0.1:62733 2024-11-20T11:20:27,144 DEBUG [Thread-1040 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:27,144 DEBUG [Thread-1032 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78b04266 to 127.0.0.1:62733 2024-11-20T11:20:27,144 DEBUG [Thread-1032 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:27,145 DEBUG [Thread-1034 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x088aa519 to 127.0.0.1:62733 2024-11-20T11:20:27,145 DEBUG [Thread-1034 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:27,145 DEBUG [Thread-1036 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e998dd3 to 127.0.0.1:62733 2024-11-20T11:20:27,145 DEBUG [Thread-1036 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:27,293 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/b9a1bd989dbe480893f58f829355456d 2024-11-20T11:20:27,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/ff921a6e521c4b17a2b0e54f094ef4b0 is 50, key is test_row_0/B:col10/1732101625195/Put/seqid=0 2024-11-20T11:20:27,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742119_1295 (size=12301) 2024-11-20T11:20:27,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on dda426a558461ab734d5c9192624badc 2024-11-20T11:20:27,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. as already flushing 2024-11-20T11:20:27,313 DEBUG [Thread-1023 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64ee0130 to 127.0.0.1:62733 2024-11-20T11:20:27,313 DEBUG [Thread-1023 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:27,318 DEBUG [Thread-1029 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x03a703d2 to 127.0.0.1:62733 2024-11-20T11:20:27,318 DEBUG [Thread-1029 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:27,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T11:20:27,703 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/ff921a6e521c4b17a2b0e54f094ef4b0 2024-11-20T11:20:27,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/b9b2d07518524707b213a7de7a131ef6 is 50, key is test_row_0/C:col10/1732101625195/Put/seqid=0 2024-11-20T11:20:27,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742120_1296 (size=12301) 2024-11-20T11:20:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T11:20:28,114 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/b9b2d07518524707b213a7de7a131ef6 2024-11-20T11:20:28,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/b9a1bd989dbe480893f58f829355456d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/b9a1bd989dbe480893f58f829355456d 2024-11-20T11:20:28,122 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/b9a1bd989dbe480893f58f829355456d, entries=150, sequenceid=348, filesize=12.0 K 2024-11-20T11:20:28,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/ff921a6e521c4b17a2b0e54f094ef4b0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/ff921a6e521c4b17a2b0e54f094ef4b0 2024-11-20T11:20:28,126 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/ff921a6e521c4b17a2b0e54f094ef4b0, entries=150, sequenceid=348, filesize=12.0 K 2024-11-20T11:20:28,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/b9b2d07518524707b213a7de7a131ef6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/b9b2d07518524707b213a7de7a131ef6 2024-11-20T11:20:28,129 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/b9b2d07518524707b213a7de7a131ef6, entries=150, sequenceid=348, filesize=12.0 K 2024-11-20T11:20:28,130 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=13.42 KB/13740 for dda426a558461ab734d5c9192624badc in 1260ms, sequenceid=348, compaction requested=true 2024-11-20T11:20:28,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2538): Flush status journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:28,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:28,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=96 2024-11-20T11:20:28,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=96 2024-11-20T11:20:28,132 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-20T11:20:28,132 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4140 sec 2024-11-20T11:20:28,133 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees in 1.4170 sec 2024-11-20T11:20:28,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T11:20:28,821 INFO [Thread-1031 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-11-20T11:20:35,056 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T11:20:35,411 DEBUG [Thread-1027 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07e55eb7 to 127.0.0.1:62733 2024-11-20T11:20:35,411 DEBUG [Thread-1027 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:35,446 DEBUG [Thread-1025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683b64c3 to 127.0.0.1:62733 2024-11-20T11:20:35,446 DEBUG [Thread-1025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:35,465 DEBUG [Thread-1021 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x44645c55 to 127.0.0.1:62733 2024-11-20T11:20:35,465 DEBUG [Thread-1021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 1 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 144 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 1 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 1 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 127 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8571 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8502 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8557 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8561 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8517 2024-11-20T11:20:35,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T11:20:35,465 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T11:20:35,465 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58341641 to 127.0.0.1:62733 2024-11-20T11:20:35,465 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:20:35,466 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T11:20:35,466 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T11:20:35,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:35,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-20T11:20:35,469 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101635468"}]},"ts":"1732101635468"} 2024-11-20T11:20:35,469 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T11:20:35,472 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T11:20:35,472 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T11:20:35,473 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=dda426a558461ab734d5c9192624badc, UNASSIGN}] 2024-11-20T11:20:35,474 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=dda426a558461ab734d5c9192624badc, UNASSIGN 2024-11-20T11:20:35,474 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=dda426a558461ab734d5c9192624badc, regionState=CLOSING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:35,475 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T11:20:35,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; CloseRegionProcedure dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:20:35,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-20T11:20:35,626 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:35,626 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(124): Close dda426a558461ab734d5c9192624badc 2024-11-20T11:20:35,626 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T11:20:35,626 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1681): Closing dda426a558461ab734d5c9192624badc, disabling compactions & flushes 2024-11-20T11:20:35,627 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:35,627 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:35,627 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. after waiting 0 ms 2024-11-20T11:20:35,627 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:35,627 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(2837): Flushing dda426a558461ab734d5c9192624badc 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T11:20:35,627 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=A 2024-11-20T11:20:35,627 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:35,627 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=B 2024-11-20T11:20:35,627 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:35,627 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.CompactingMemStore(205): FLUSHING TO DISK dda426a558461ab734d5c9192624badc, store=C 2024-11-20T11:20:35,627 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:35,631 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/573aa3947d6b4eeb9a096b5128d33115 is 50, key is test_row_0/A:col10/1732101635445/Put/seqid=0 2024-11-20T11:20:35,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742121_1297 (size=12301) 2024-11-20T11:20:35,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-20T11:20:36,035 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/573aa3947d6b4eeb9a096b5128d33115 2024-11-20T11:20:36,041 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/35ca5775a9ab41c0bf180255aaf29d5f is 50, key is test_row_0/B:col10/1732101635445/Put/seqid=0 2024-11-20T11:20:36,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742122_1298 (size=12301) 2024-11-20T11:20:36,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-20T11:20:36,445 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/35ca5775a9ab41c0bf180255aaf29d5f 2024-11-20T11:20:36,451 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/ac97387ff6674568bf56cb23d87ac0c5 is 50, key is test_row_0/C:col10/1732101635445/Put/seqid=0 2024-11-20T11:20:36,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742123_1299 (size=12301) 2024-11-20T11:20:36,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-20T11:20:36,855 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/ac97387ff6674568bf56cb23d87ac0c5 2024-11-20T11:20:36,859 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/A/573aa3947d6b4eeb9a096b5128d33115 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/573aa3947d6b4eeb9a096b5128d33115 2024-11-20T11:20:36,862 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/573aa3947d6b4eeb9a096b5128d33115, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T11:20:36,863 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/B/35ca5775a9ab41c0bf180255aaf29d5f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/35ca5775a9ab41c0bf180255aaf29d5f 2024-11-20T11:20:36,866 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/35ca5775a9ab41c0bf180255aaf29d5f, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T11:20:36,866 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/.tmp/C/ac97387ff6674568bf56cb23d87ac0c5 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/ac97387ff6674568bf56cb23d87ac0c5 2024-11-20T11:20:36,869 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/ac97387ff6674568bf56cb23d87ac0c5, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T11:20:36,870 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for dda426a558461ab734d5c9192624badc in 1243ms, sequenceid=356, compaction requested=true 2024-11-20T11:20:36,870 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/d28025a2c19141b2b21676cbc4b4fa26, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c42c8f2e41274a4a94e7e0c9625790af, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/e8a18c57627b41a2bff54933fbcf6256, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ec8d6cf14b414410bfd4a4af18e9c265, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/379bd1722e914f21bc26ad4f07834394, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ae53f675783d4cdf884465df4f2c7326, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/9e653b16aa964165bde8f0de70abf85d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/cb2450f3272b4eb9a4c4148fe801d1a5, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/b352297cb2724dda8c3afea87e727122, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/3efe9a51b7e64d798ed0d9fcf5da00f6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/e0a507ebb7b3431ca64894ddc230bd43, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c2835f082ec6474a9fcb0fe0b5a803db, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/52bbc8fd6e4747d9ba7508f95915a89b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/125dbb58d4d14c64a38a8c2ad96329e2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/7659841c1f804a0b9ddc816c912560c9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/19a4913d54e24330ac82fbd6483112bf, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/3d7ddea78e9746359c63a9c1bf981d4b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c2ff44303a094818be5b7aeffe883749, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c936efe0993c4bffae5d648e1841150f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/df076787b998443bb36b54718ac17694, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/50c726a5e3c44465b39e8c14873ae422, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/0c27465d31184da29f96fd2cda5a42c7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ff3e8e4dee1d4ac6a18ce52195bc61f7] to archive 2024-11-20T11:20:36,871 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:20:36,873 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/d28025a2c19141b2b21676cbc4b4fa26 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/d28025a2c19141b2b21676cbc4b4fa26 2024-11-20T11:20:36,874 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c42c8f2e41274a4a94e7e0c9625790af to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c42c8f2e41274a4a94e7e0c9625790af 2024-11-20T11:20:36,874 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/e8a18c57627b41a2bff54933fbcf6256 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/e8a18c57627b41a2bff54933fbcf6256 2024-11-20T11:20:36,875 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ec8d6cf14b414410bfd4a4af18e9c265 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ec8d6cf14b414410bfd4a4af18e9c265 2024-11-20T11:20:36,876 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/379bd1722e914f21bc26ad4f07834394 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/379bd1722e914f21bc26ad4f07834394 2024-11-20T11:20:36,877 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ae53f675783d4cdf884465df4f2c7326 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ae53f675783d4cdf884465df4f2c7326 2024-11-20T11:20:36,878 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/9e653b16aa964165bde8f0de70abf85d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/9e653b16aa964165bde8f0de70abf85d 2024-11-20T11:20:36,879 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/cb2450f3272b4eb9a4c4148fe801d1a5 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/cb2450f3272b4eb9a4c4148fe801d1a5 2024-11-20T11:20:36,879 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/b352297cb2724dda8c3afea87e727122 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/b352297cb2724dda8c3afea87e727122 2024-11-20T11:20:36,880 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/3efe9a51b7e64d798ed0d9fcf5da00f6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/3efe9a51b7e64d798ed0d9fcf5da00f6 2024-11-20T11:20:36,881 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/e0a507ebb7b3431ca64894ddc230bd43 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/e0a507ebb7b3431ca64894ddc230bd43 2024-11-20T11:20:36,882 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c2835f082ec6474a9fcb0fe0b5a803db to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c2835f082ec6474a9fcb0fe0b5a803db 2024-11-20T11:20:36,883 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/52bbc8fd6e4747d9ba7508f95915a89b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/52bbc8fd6e4747d9ba7508f95915a89b 2024-11-20T11:20:36,884 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/125dbb58d4d14c64a38a8c2ad96329e2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/125dbb58d4d14c64a38a8c2ad96329e2 2024-11-20T11:20:36,884 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/7659841c1f804a0b9ddc816c912560c9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/7659841c1f804a0b9ddc816c912560c9 2024-11-20T11:20:36,885 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/19a4913d54e24330ac82fbd6483112bf to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/19a4913d54e24330ac82fbd6483112bf 2024-11-20T11:20:36,886 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/3d7ddea78e9746359c63a9c1bf981d4b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/3d7ddea78e9746359c63a9c1bf981d4b 2024-11-20T11:20:36,887 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c2ff44303a094818be5b7aeffe883749 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c2ff44303a094818be5b7aeffe883749 2024-11-20T11:20:36,888 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c936efe0993c4bffae5d648e1841150f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/c936efe0993c4bffae5d648e1841150f 2024-11-20T11:20:36,888 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/df076787b998443bb36b54718ac17694 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/df076787b998443bb36b54718ac17694 2024-11-20T11:20:36,889 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/50c726a5e3c44465b39e8c14873ae422 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/50c726a5e3c44465b39e8c14873ae422 2024-11-20T11:20:36,890 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/0c27465d31184da29f96fd2cda5a42c7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/0c27465d31184da29f96fd2cda5a42c7 2024-11-20T11:20:36,891 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ff3e8e4dee1d4ac6a18ce52195bc61f7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ff3e8e4dee1d4ac6a18ce52195bc61f7 2024-11-20T11:20:36,892 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/9d3af5e59ffd410aa5532c87831610a3, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/978fbee1d0ab4745a9190f26f96a3f2f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/a165d42d366b487b9dbde2c100d8b0a8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/afeaa8d071e74eee92a54250d632924b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/15a98fbc95d24b259ffb1dff34ad7b53, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/2883d683efcd490ebd2093ede81847d6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/e164f8085a864cabaaf61b4518cab1b9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/261f2e7be5d44592925a4547183d9c4b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/fd1c7d0e58af45788548b34111989416, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/6ebfd336f3de40eaa1ea0725da374f0e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/87899576ba224943b691c92209ff6016, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/14cd88f47ebc42468d51fbe5b8496616, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/b18c13624aaf479cb95d36be4c9def52, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/ad98f0fba58d45e0b7dd93f6e1d7d63b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/b06f442780c14361860afa717c268628, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/7846313f6752440cb775e26552a031b7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/eab7fb87b4db4544a83c44244184547a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/21559dfb507649508673238bed7b7a07, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/09ec76056c594a21b02298ba46cb6ac1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/2a528400f53d4f3dbe666f1b970fd51c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/06fb3a40031b4b3aaa80b537806e1738, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/0045d1dda5c3487b9679261bdd926606, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/d2fd4619e3d849cabc2abc12ed1ad834] to archive 2024-11-20T11:20:36,893 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:20:36,894 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/9d3af5e59ffd410aa5532c87831610a3 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/9d3af5e59ffd410aa5532c87831610a3 2024-11-20T11:20:36,895 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/978fbee1d0ab4745a9190f26f96a3f2f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/978fbee1d0ab4745a9190f26f96a3f2f 2024-11-20T11:20:36,896 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/a165d42d366b487b9dbde2c100d8b0a8 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/a165d42d366b487b9dbde2c100d8b0a8 2024-11-20T11:20:36,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/afeaa8d071e74eee92a54250d632924b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/afeaa8d071e74eee92a54250d632924b 2024-11-20T11:20:36,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/15a98fbc95d24b259ffb1dff34ad7b53 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/15a98fbc95d24b259ffb1dff34ad7b53 2024-11-20T11:20:36,898 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/2883d683efcd490ebd2093ede81847d6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/2883d683efcd490ebd2093ede81847d6 2024-11-20T11:20:36,899 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/e164f8085a864cabaaf61b4518cab1b9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/e164f8085a864cabaaf61b4518cab1b9 2024-11-20T11:20:36,900 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/261f2e7be5d44592925a4547183d9c4b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/261f2e7be5d44592925a4547183d9c4b 2024-11-20T11:20:36,901 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/fd1c7d0e58af45788548b34111989416 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/fd1c7d0e58af45788548b34111989416 2024-11-20T11:20:36,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/6ebfd336f3de40eaa1ea0725da374f0e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/6ebfd336f3de40eaa1ea0725da374f0e 2024-11-20T11:20:36,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/87899576ba224943b691c92209ff6016 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/87899576ba224943b691c92209ff6016 2024-11-20T11:20:36,903 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/14cd88f47ebc42468d51fbe5b8496616 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/14cd88f47ebc42468d51fbe5b8496616 2024-11-20T11:20:36,904 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/b18c13624aaf479cb95d36be4c9def52 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/b18c13624aaf479cb95d36be4c9def52 2024-11-20T11:20:36,905 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/ad98f0fba58d45e0b7dd93f6e1d7d63b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/ad98f0fba58d45e0b7dd93f6e1d7d63b 2024-11-20T11:20:36,906 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/b06f442780c14361860afa717c268628 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/b06f442780c14361860afa717c268628 2024-11-20T11:20:36,907 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/7846313f6752440cb775e26552a031b7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/7846313f6752440cb775e26552a031b7 2024-11-20T11:20:36,908 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/eab7fb87b4db4544a83c44244184547a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/eab7fb87b4db4544a83c44244184547a 2024-11-20T11:20:36,909 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/21559dfb507649508673238bed7b7a07 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/21559dfb507649508673238bed7b7a07 2024-11-20T11:20:36,910 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/09ec76056c594a21b02298ba46cb6ac1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/09ec76056c594a21b02298ba46cb6ac1 2024-11-20T11:20:36,911 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/2a528400f53d4f3dbe666f1b970fd51c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/2a528400f53d4f3dbe666f1b970fd51c 2024-11-20T11:20:36,911 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/06fb3a40031b4b3aaa80b537806e1738 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/06fb3a40031b4b3aaa80b537806e1738 2024-11-20T11:20:36,912 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/0045d1dda5c3487b9679261bdd926606 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/0045d1dda5c3487b9679261bdd926606 2024-11-20T11:20:36,913 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/d2fd4619e3d849cabc2abc12ed1ad834 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/d2fd4619e3d849cabc2abc12ed1ad834 2024-11-20T11:20:36,914 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/fb7b2c1f5e8d4a86a88c57fefe408742, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7a5687ed3d344a04b035e24dc47c0b42, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7a13cbfd0ae247748ba1c71b24e8ccc6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/71bf05eb929e478ea9397eff7407b7d9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/8573b60d6ee14157afd0841698befcf1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2e799bbca71d4401ab7f67a48c48e798, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/323f5812d6234586aef4d676acdc7eb0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/36925946384d45b9999b183fb573f358, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/e22551da4a7c467693bf45832f7feb57, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/45df400150b54ce6a220d470f02b42be, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/08bd96ef799642b59707353774191f68, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/a3c7874996864770838585e628053e37, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/31ac0059b24a4bbabf67874892fb8db7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2acd6ac382a34336b52c6724f3103433, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2eba759a38764b3a8aadc78561791233, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/479654a73b114e8e9c2135fe0bced56f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/479bdef0c5904b1cb53b2bfced24beb0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/27ec6ae3f6e443be86c1b2259d8e3c1f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/b4d82bd37d36408fa300855194d7c0fd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/babdfa65a2d04adb89531e4af3aed47c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7e16e50135784fb5a2fbc1dcd5f64c7e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/087bcfc5ec8a4715aed960eb4f226557, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/fcd02ef0c0464c10baa4297b56015880] to archive 2024-11-20T11:20:36,915 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:20:36,916 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/fb7b2c1f5e8d4a86a88c57fefe408742 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/fb7b2c1f5e8d4a86a88c57fefe408742 2024-11-20T11:20:36,917 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7a5687ed3d344a04b035e24dc47c0b42 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7a5687ed3d344a04b035e24dc47c0b42 2024-11-20T11:20:36,918 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7a13cbfd0ae247748ba1c71b24e8ccc6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7a13cbfd0ae247748ba1c71b24e8ccc6 2024-11-20T11:20:36,919 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/71bf05eb929e478ea9397eff7407b7d9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/71bf05eb929e478ea9397eff7407b7d9 2024-11-20T11:20:36,919 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/8573b60d6ee14157afd0841698befcf1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/8573b60d6ee14157afd0841698befcf1 2024-11-20T11:20:36,920 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2e799bbca71d4401ab7f67a48c48e798 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2e799bbca71d4401ab7f67a48c48e798 2024-11-20T11:20:36,921 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/323f5812d6234586aef4d676acdc7eb0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/323f5812d6234586aef4d676acdc7eb0 2024-11-20T11:20:36,922 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/36925946384d45b9999b183fb573f358 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/36925946384d45b9999b183fb573f358 2024-11-20T11:20:36,923 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/e22551da4a7c467693bf45832f7feb57 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/e22551da4a7c467693bf45832f7feb57 2024-11-20T11:20:36,924 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/45df400150b54ce6a220d470f02b42be to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/45df400150b54ce6a220d470f02b42be 2024-11-20T11:20:36,925 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/08bd96ef799642b59707353774191f68 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/08bd96ef799642b59707353774191f68 2024-11-20T11:20:36,926 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/a3c7874996864770838585e628053e37 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/a3c7874996864770838585e628053e37 2024-11-20T11:20:36,927 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/31ac0059b24a4bbabf67874892fb8db7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/31ac0059b24a4bbabf67874892fb8db7 2024-11-20T11:20:36,928 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2acd6ac382a34336b52c6724f3103433 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2acd6ac382a34336b52c6724f3103433 2024-11-20T11:20:36,929 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2eba759a38764b3a8aadc78561791233 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/2eba759a38764b3a8aadc78561791233 2024-11-20T11:20:36,930 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/479654a73b114e8e9c2135fe0bced56f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/479654a73b114e8e9c2135fe0bced56f 2024-11-20T11:20:36,931 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/479bdef0c5904b1cb53b2bfced24beb0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/479bdef0c5904b1cb53b2bfced24beb0 2024-11-20T11:20:36,932 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/27ec6ae3f6e443be86c1b2259d8e3c1f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/27ec6ae3f6e443be86c1b2259d8e3c1f 2024-11-20T11:20:36,933 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/b4d82bd37d36408fa300855194d7c0fd to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/b4d82bd37d36408fa300855194d7c0fd 2024-11-20T11:20:36,934 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/babdfa65a2d04adb89531e4af3aed47c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/babdfa65a2d04adb89531e4af3aed47c 2024-11-20T11:20:36,935 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7e16e50135784fb5a2fbc1dcd5f64c7e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/7e16e50135784fb5a2fbc1dcd5f64c7e 2024-11-20T11:20:36,936 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/087bcfc5ec8a4715aed960eb4f226557 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/087bcfc5ec8a4715aed960eb4f226557 2024-11-20T11:20:36,937 DEBUG [StoreCloser-TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/fcd02ef0c0464c10baa4297b56015880 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/fcd02ef0c0464c10baa4297b56015880 2024-11-20T11:20:36,941 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/recovered.edits/359.seqid, newMaxSeqId=359, maxSeqId=1 2024-11-20T11:20:36,941 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc. 2024-11-20T11:20:36,941 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1635): Region close journal for dda426a558461ab734d5c9192624badc: 2024-11-20T11:20:36,943 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(170): Closed dda426a558461ab734d5c9192624badc 2024-11-20T11:20:36,943 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=dda426a558461ab734d5c9192624badc, regionState=CLOSED 2024-11-20T11:20:36,945 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-20T11:20:36,945 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; CloseRegionProcedure dda426a558461ab734d5c9192624badc, server=ee8338ed7cc0,35185,1732101546666 in 1.4690 sec 2024-11-20T11:20:36,946 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-20T11:20:36,946 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=dda426a558461ab734d5c9192624badc, UNASSIGN in 1.4720 sec 2024-11-20T11:20:36,947 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-20T11:20:36,947 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4740 sec 2024-11-20T11:20:36,948 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101636948"}]},"ts":"1732101636948"} 2024-11-20T11:20:36,949 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T11:20:36,951 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T11:20:36,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4860 sec 2024-11-20T11:20:37,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-20T11:20:37,572 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-11-20T11:20:37,572 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T11:20:37,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:37,574 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=101, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:37,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-20T11:20:37,574 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=101, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:37,576 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc 2024-11-20T11:20:37,578 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/recovered.edits] 2024-11-20T11:20:37,581 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/573aa3947d6b4eeb9a096b5128d33115 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/573aa3947d6b4eeb9a096b5128d33115 2024-11-20T11:20:37,582 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ae7e31ff785241dd8d32d2e200cc6653 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/ae7e31ff785241dd8d32d2e200cc6653 2024-11-20T11:20:37,583 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/b9a1bd989dbe480893f58f829355456d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/b9a1bd989dbe480893f58f829355456d 2024-11-20T11:20:37,584 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/cef6679250854cb9947d7e68a1f2e389 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/A/cef6679250854cb9947d7e68a1f2e389 2024-11-20T11:20:37,586 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/35ca5775a9ab41c0bf180255aaf29d5f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/35ca5775a9ab41c0bf180255aaf29d5f 2024-11-20T11:20:37,587 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/9bdd830731b345a2843addc69c0a7610 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/9bdd830731b345a2843addc69c0a7610 2024-11-20T11:20:37,588 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/a0d1b71ee28d478e8d8311a007cb3067 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/a0d1b71ee28d478e8d8311a007cb3067 2024-11-20T11:20:37,589 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/ff921a6e521c4b17a2b0e54f094ef4b0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/B/ff921a6e521c4b17a2b0e54f094ef4b0 2024-11-20T11:20:37,591 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/0204ff8b298f4587aa2c0b519a1497c2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/0204ff8b298f4587aa2c0b519a1497c2 2024-11-20T11:20:37,592 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/834fcd7671e7459e9cae3be51e8572c1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/834fcd7671e7459e9cae3be51e8572c1 2024-11-20T11:20:37,593 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/ac97387ff6674568bf56cb23d87ac0c5 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/ac97387ff6674568bf56cb23d87ac0c5 2024-11-20T11:20:37,593 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/b9b2d07518524707b213a7de7a131ef6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/C/b9b2d07518524707b213a7de7a131ef6 2024-11-20T11:20:37,596 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/recovered.edits/359.seqid to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc/recovered.edits/359.seqid 2024-11-20T11:20:37,596 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/dda426a558461ab734d5c9192624badc 2024-11-20T11:20:37,596 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T11:20:37,598 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=101, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:37,602 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T11:20:37,604 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T11:20:37,605 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=101, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:37,605 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T11:20:37,605 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732101637605"}]},"ts":"9223372036854775807"} 2024-11-20T11:20:37,606 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T11:20:37,606 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => dda426a558461ab734d5c9192624badc, NAME => 'TestAcidGuarantees,,1732101604979.dda426a558461ab734d5c9192624badc.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T11:20:37,606 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T11:20:37,606 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732101637606"}]},"ts":"9223372036854775807"} 2024-11-20T11:20:37,607 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T11:20:37,609 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=101, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:37,610 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 37 msec 2024-11-20T11:20:37,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-20T11:20:37,675 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 101 completed 2024-11-20T11:20:37,684 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=238 (was 242), OpenFileDescriptor=451 (was 462), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=320 (was 327), ProcessCount=11 (was 11), AvailableMemoryMB=5845 (was 5871) 2024-11-20T11:20:37,692 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=238, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=320, ProcessCount=11, AvailableMemoryMB=5845 2024-11-20T11:20:37,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T11:20:37,694 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T11:20:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:37,695 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=102, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T11:20:37,695 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:37,695 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 102 2024-11-20T11:20:37,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T11:20:37,696 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=102, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T11:20:37,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742124_1300 (size=960) 2024-11-20T11:20:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T11:20:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T11:20:38,103 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830 2024-11-20T11:20:38,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742125_1301 (size=53) 2024-11-20T11:20:38,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T11:20:38,508 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:20:38,509 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 0470447603cf0ef7bd1ff47e79d9530d, disabling compactions & flushes 2024-11-20T11:20:38,509 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:38,509 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:38,509 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. after waiting 0 ms 2024-11-20T11:20:38,509 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:38,509 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:38,509 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:38,510 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=102, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T11:20:38,510 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732101638510"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732101638510"}]},"ts":"1732101638510"} 2024-11-20T11:20:38,511 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T11:20:38,511 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=102, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T11:20:38,512 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101638511"}]},"ts":"1732101638511"} 2024-11-20T11:20:38,512 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T11:20:38,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0470447603cf0ef7bd1ff47e79d9530d, ASSIGN}] 2024-11-20T11:20:38,516 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0470447603cf0ef7bd1ff47e79d9530d, ASSIGN 2024-11-20T11:20:38,517 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=0470447603cf0ef7bd1ff47e79d9530d, ASSIGN; state=OFFLINE, location=ee8338ed7cc0,35185,1732101546666; forceNewPlan=false, retain=false 2024-11-20T11:20:38,667 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=0470447603cf0ef7bd1ff47e79d9530d, regionState=OPENING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:38,668 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE; OpenRegionProcedure 0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:20:38,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T11:20:38,820 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:38,823 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:38,823 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.HRegion(7285): Opening region: {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} 2024-11-20T11:20:38,823 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:38,823 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:20:38,823 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.HRegion(7327): checking encryption for 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:38,823 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.HRegion(7330): checking classloading for 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:38,824 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:38,825 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:20:38,826 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0470447603cf0ef7bd1ff47e79d9530d columnFamilyName A 2024-11-20T11:20:38,826 DEBUG [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:38,826 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.HStore(327): Store=0470447603cf0ef7bd1ff47e79d9530d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:20:38,826 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:38,827 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:20:38,827 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0470447603cf0ef7bd1ff47e79d9530d columnFamilyName B 2024-11-20T11:20:38,827 DEBUG [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:38,828 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.HStore(327): Store=0470447603cf0ef7bd1ff47e79d9530d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:20:38,828 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:38,829 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:20:38,829 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0470447603cf0ef7bd1ff47e79d9530d columnFamilyName C 2024-11-20T11:20:38,829 DEBUG [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:38,829 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.HStore(327): Store=0470447603cf0ef7bd1ff47e79d9530d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:20:38,829 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:38,830 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:38,830 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:38,831 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T11:20:38,832 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.HRegion(1085): writing seq id for 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:38,834 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T11:20:38,834 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.HRegion(1102): Opened 0470447603cf0ef7bd1ff47e79d9530d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74926990, jitterRate=0.1164991557598114}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T11:20:38,835 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.HRegion(1001): Region open journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:38,835 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., pid=104, masterSystemTime=1732101638820 2024-11-20T11:20:38,837 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:38,837 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=104}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:38,837 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=0470447603cf0ef7bd1ff47e79d9530d, regionState=OPEN, openSeqNum=2, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:38,839 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-11-20T11:20:38,839 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; OpenRegionProcedure 0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 in 170 msec 2024-11-20T11:20:38,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-20T11:20:38,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0470447603cf0ef7bd1ff47e79d9530d, ASSIGN in 324 msec 2024-11-20T11:20:38,841 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=102, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T11:20:38,841 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101638841"}]},"ts":"1732101638841"} 2024-11-20T11:20:38,841 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T11:20:38,844 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=102, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T11:20:38,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1500 sec 2024-11-20T11:20:39,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T11:20:39,800 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-20T11:20:39,801 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5fe71801 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@bf5e2f0 2024-11-20T11:20:39,804 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b82ba2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:39,806 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:39,807 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50024, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:39,808 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T11:20:39,809 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T11:20:39,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T11:20:39,810 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T11:20:39,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=105, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T11:20:39,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742126_1302 (size=996) 2024-11-20T11:20:40,221 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T11:20:40,221 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T11:20:40,223 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T11:20:40,225 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0470447603cf0ef7bd1ff47e79d9530d, REOPEN/MOVE}] 2024-11-20T11:20:40,226 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=107, ppid=106, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0470447603cf0ef7bd1ff47e79d9530d, REOPEN/MOVE 2024-11-20T11:20:40,226 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=107 updating hbase:meta row=0470447603cf0ef7bd1ff47e79d9530d, regionState=CLOSING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,227 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T11:20:40,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE; CloseRegionProcedure 0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:20:40,378 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,379 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=108}] handler.UnassignRegionHandler(124): Close 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:40,379 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=108}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T11:20:40,379 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=108}] regionserver.HRegion(1681): Closing 0470447603cf0ef7bd1ff47e79d9530d, disabling compactions & flushes 2024-11-20T11:20:40,379 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=108}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:40,379 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=108}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:40,379 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=108}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. after waiting 0 ms 2024-11-20T11:20:40,379 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=108}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:40,382 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=108}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T11:20:40,383 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=108}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:40,383 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=108}] regionserver.HRegion(1635): Region close journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:40,383 WARN [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=108}] regionserver.HRegionServer(3786): Not adding moved region record: 0470447603cf0ef7bd1ff47e79d9530d to self. 2024-11-20T11:20:40,384 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=108}] handler.UnassignRegionHandler(170): Closed 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:40,385 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=107 updating hbase:meta row=0470447603cf0ef7bd1ff47e79d9530d, regionState=CLOSED 2024-11-20T11:20:40,386 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=108, resume processing ppid=107 2024-11-20T11:20:40,386 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; CloseRegionProcedure 0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 in 158 msec 2024-11-20T11:20:40,387 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=107, ppid=106, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=0470447603cf0ef7bd1ff47e79d9530d, REOPEN/MOVE; state=CLOSED, location=ee8338ed7cc0,35185,1732101546666; forceNewPlan=false, retain=true 2024-11-20T11:20:40,537 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=107 updating hbase:meta row=0470447603cf0ef7bd1ff47e79d9530d, regionState=OPENING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=107, state=RUNNABLE; OpenRegionProcedure 0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:20:40,690 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,692 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:40,692 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7285): Opening region: {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} 2024-11-20T11:20:40,693 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:40,693 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:20:40,693 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7327): checking encryption for 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:40,693 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7330): checking classloading for 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:40,694 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:40,695 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:20:40,695 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0470447603cf0ef7bd1ff47e79d9530d columnFamilyName A 2024-11-20T11:20:40,696 DEBUG [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:40,697 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.HStore(327): Store=0470447603cf0ef7bd1ff47e79d9530d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:20:40,697 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:40,697 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:20:40,698 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0470447603cf0ef7bd1ff47e79d9530d columnFamilyName B 2024-11-20T11:20:40,698 DEBUG [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:40,698 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.HStore(327): Store=0470447603cf0ef7bd1ff47e79d9530d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:20:40,698 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:40,698 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:20:40,699 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0470447603cf0ef7bd1ff47e79d9530d columnFamilyName C 2024-11-20T11:20:40,699 DEBUG [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:40,699 INFO [StoreOpener-0470447603cf0ef7bd1ff47e79d9530d-1 {}] regionserver.HStore(327): Store=0470447603cf0ef7bd1ff47e79d9530d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:20:40,699 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:40,700 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:40,700 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:40,702 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T11:20:40,703 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1085): writing seq id for 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:40,703 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1102): Opened 0470447603cf0ef7bd1ff47e79d9530d; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74012188, jitterRate=0.10286754369735718}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T11:20:40,704 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1001): Region open journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:40,704 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., pid=109, masterSystemTime=1732101640690 2024-11-20T11:20:40,706 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:40,706 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:40,706 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=107 updating hbase:meta row=0470447603cf0ef7bd1ff47e79d9530d, regionState=OPEN, openSeqNum=5, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,708 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=107 2024-11-20T11:20:40,708 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=107, state=SUCCESS; OpenRegionProcedure 0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 in 169 msec 2024-11-20T11:20:40,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-20T11:20:40,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0470447603cf0ef7bd1ff47e79d9530d, REOPEN/MOVE in 483 msec 2024-11-20T11:20:40,710 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=105 2024-11-20T11:20:40,710 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=105, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 486 msec 2024-11-20T11:20:40,712 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 900 msec 2024-11-20T11:20:40,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=105 2024-11-20T11:20:40,714 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51f7d511 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75b14fbd 2024-11-20T11:20:40,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b6cf8cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:40,720 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1dc42ea6 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62f74604 2024-11-20T11:20:40,723 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ec15031, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:40,723 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x117e86d9 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@49e13594 2024-11-20T11:20:40,726 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dd5b441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:40,726 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6cd96549 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c54a0d3 2024-11-20T11:20:40,729 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c336ea4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:40,730 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31aea41b to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3875c8c5 2024-11-20T11:20:40,732 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f94d721, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:40,733 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0801ba40 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@319559be 2024-11-20T11:20:40,735 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f49665c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:40,736 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x27539bdc to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c907e21 2024-11-20T11:20:40,739 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683f8469, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:40,739 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e3203d9 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61ec0f48 2024-11-20T11:20:40,742 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75e4d3d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:40,742 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x798e7fd4 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7819b9e2 2024-11-20T11:20:40,745 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b308f62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:40,745 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7284f16d to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47679076 2024-11-20T11:20:40,748 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68035c67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:20:40,750 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:40,750 DEBUG [hconnection-0x47e9ee11-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:40,751 DEBUG [hconnection-0x319d3245-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:40,751 DEBUG [hconnection-0x113fee6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:40,752 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:40,752 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:40,754 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50040, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:40,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-20T11:20:40,754 DEBUG [hconnection-0x5322bd5f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:40,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T11:20:40,755 DEBUG [hconnection-0xf3db965-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:40,755 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50064, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:40,755 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:40,756 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50078, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:40,756 DEBUG [hconnection-0x490ed228-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:40,756 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:40,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:40,756 DEBUG [hconnection-0x77a8dc64-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:40,757 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50084, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:40,757 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50100, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:40,758 DEBUG [hconnection-0x4b37d6b4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:40,758 DEBUG [hconnection-0x62c580ee-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:40,759 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50120, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:40,759 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50116, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:40,759 DEBUG [hconnection-0x4a61003c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:20:40,762 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50128, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:20:40,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:40,763 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T11:20:40,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:40,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:40,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:40,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:40,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:40,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:40,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:40,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:40,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101700777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101700777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:40,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101700781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:40,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101700783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:40,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101700783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bd389848acf54a8f894785b2d6d28aab_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101640763/Put/seqid=0 2024-11-20T11:20:40,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742127_1303 (size=12154) 2024-11-20T11:20:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T11:20:40,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:40,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101700884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:40,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101700885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:40,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:40,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101700885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101700885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:40,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101700888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,909 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:40,909 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T11:20:40,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:40,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:40,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:40,910 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:40,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:40,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T11:20:41,062 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T11:20:41,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:41,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:41,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:41,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101701088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101701088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101701089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101701089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101701093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,199 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:41,203 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bd389848acf54a8f894785b2d6d28aab_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bd389848acf54a8f894785b2d6d28aab_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:41,204 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/99bffaacdffa466caca9f8d4ed5db518, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:41,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/99bffaacdffa466caca9f8d4ed5db518 is 175, key is test_row_0/A:col10/1732101640763/Put/seqid=0 2024-11-20T11:20:41,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742128_1304 (size=30955) 2024-11-20T11:20:41,214 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T11:20:41,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:41,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:41,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:41,215 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T11:20:41,367 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T11:20:41,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:41,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:41,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:41,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101701391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101701393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101701393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101701393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101701396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,520 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T11:20:41,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:41,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:41,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:41,521 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,610 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=19, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/99bffaacdffa466caca9f8d4ed5db518 2024-11-20T11:20:41,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/b0ee536bddca47bf8bd5526bdde99c32 is 50, key is test_row_0/B:col10/1732101640763/Put/seqid=0 2024-11-20T11:20:41,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742129_1305 (size=12001) 2024-11-20T11:20:41,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=19 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/b0ee536bddca47bf8bd5526bdde99c32 2024-11-20T11:20:41,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/6e0289b797034fec89a71d41eb85f0d8 is 50, key is test_row_0/C:col10/1732101640763/Put/seqid=0 2024-11-20T11:20:41,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742130_1306 (size=12001) 2024-11-20T11:20:41,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=19 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/6e0289b797034fec89a71d41eb85f0d8 2024-11-20T11:20:41,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/99bffaacdffa466caca9f8d4ed5db518 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bffaacdffa466caca9f8d4ed5db518 2024-11-20T11:20:41,673 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T11:20:41,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:41,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:41,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:41,674 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:41,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bffaacdffa466caca9f8d4ed5db518, entries=150, sequenceid=19, filesize=30.2 K 2024-11-20T11:20:41,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/b0ee536bddca47bf8bd5526bdde99c32 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/b0ee536bddca47bf8bd5526bdde99c32 2024-11-20T11:20:41,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/b0ee536bddca47bf8bd5526bdde99c32, entries=150, sequenceid=19, filesize=11.7 K 2024-11-20T11:20:41,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/6e0289b797034fec89a71d41eb85f0d8 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/6e0289b797034fec89a71d41eb85f0d8 2024-11-20T11:20:41,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/6e0289b797034fec89a71d41eb85f0d8, entries=150, sequenceid=19, filesize=11.7 K 2024-11-20T11:20:41,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 0470447603cf0ef7bd1ff47e79d9530d in 927ms, sequenceid=19, compaction requested=false 2024-11-20T11:20:41,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:41,826 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T11:20:41,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:41,827 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T11:20:41,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:41,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:41,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:41,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:41,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:41,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:41,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120739e05f23f424a84a9aecad87e0f2092_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101640782/Put/seqid=0 2024-11-20T11:20:41,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742131_1307 (size=12154) 2024-11-20T11:20:41,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T11:20:41,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:41,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:41,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101701907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101701907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101701911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101701912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:41,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:41,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101701912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101702013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101702013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101702016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101702017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101702017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101702216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101702217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101702220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101702220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101702221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:42,244 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120739e05f23f424a84a9aecad87e0f2092_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120739e05f23f424a84a9aecad87e0f2092_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:42,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/4530add9b18a4195a428fe19314b1a0f, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:42,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/4530add9b18a4195a428fe19314b1a0f is 175, key is test_row_0/A:col10/1732101640782/Put/seqid=0 2024-11-20T11:20:42,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742132_1308 (size=30955) 2024-11-20T11:20:42,473 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T11:20:42,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101702519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101702524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101702525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101702527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101702527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:42,650 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/4530add9b18a4195a428fe19314b1a0f 2024-11-20T11:20:42,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/ce57e938a10240ee8e5d34d94b2b4eb2 is 50, key is test_row_0/B:col10/1732101640782/Put/seqid=0 2024-11-20T11:20:42,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742133_1309 (size=12001) 2024-11-20T11:20:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T11:20:43,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:43,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101703023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:43,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:43,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101703029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:43,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:43,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101703031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:43,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:43,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101703031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:43,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:43,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101703036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:43,063 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/ce57e938a10240ee8e5d34d94b2b4eb2 2024-11-20T11:20:43,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/66546e2dede14596aafffde066642a7c is 50, key is test_row_0/C:col10/1732101640782/Put/seqid=0 2024-11-20T11:20:43,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742134_1310 (size=12001) 2024-11-20T11:20:43,475 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/66546e2dede14596aafffde066642a7c 2024-11-20T11:20:43,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/4530add9b18a4195a428fe19314b1a0f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/4530add9b18a4195a428fe19314b1a0f 2024-11-20T11:20:43,484 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/4530add9b18a4195a428fe19314b1a0f, entries=150, sequenceid=41, filesize=30.2 K 2024-11-20T11:20:43,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/ce57e938a10240ee8e5d34d94b2b4eb2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/ce57e938a10240ee8e5d34d94b2b4eb2 2024-11-20T11:20:43,487 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/ce57e938a10240ee8e5d34d94b2b4eb2, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T11:20:43,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/66546e2dede14596aafffde066642a7c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/66546e2dede14596aafffde066642a7c 2024-11-20T11:20:43,491 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/66546e2dede14596aafffde066642a7c, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T11:20:43,493 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 0470447603cf0ef7bd1ff47e79d9530d in 1665ms, sequenceid=41, compaction requested=false 2024-11-20T11:20:43,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:43,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:43,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-20T11:20:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-20T11:20:43,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-20T11:20:43,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7380 sec 2024-11-20T11:20:43,497 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 2.7460 sec 2024-11-20T11:20:44,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:44,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T11:20:44,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:44,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:44,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:44,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:44,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:44,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:44,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120126ff3bb9ce341d7821b57cd8506477c_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101644032/Put/seqid=0 2024-11-20T11:20:44,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742135_1311 (size=14594) 2024-11-20T11:20:44,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101704087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101704087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101704088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101704089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101704089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101704194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101704194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101704194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101704195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101704195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101704396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101704396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101704397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101704399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101704400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,454 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:44,458 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120126ff3bb9ce341d7821b57cd8506477c_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120126ff3bb9ce341d7821b57cd8506477c_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:44,459 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/5854cf6583564cb3aa47df6ea52f7448, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:44,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/5854cf6583564cb3aa47df6ea52f7448 is 175, key is test_row_0/A:col10/1732101644032/Put/seqid=0 2024-11-20T11:20:44,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742136_1312 (size=39549) 2024-11-20T11:20:44,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101704700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101704701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101704701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101704705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:44,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101704707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:44,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T11:20:44,860 INFO [Thread-1395 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-20T11:20:44,861 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:44,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-20T11:20:44,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T11:20:44,863 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:44,863 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:44,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:44,865 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=57, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/5854cf6583564cb3aa47df6ea52f7448 2024-11-20T11:20:44,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/adec9dfab7a6492daf76ce072d8eca13 is 50, key is test_row_0/B:col10/1732101644032/Put/seqid=0 2024-11-20T11:20:44,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742137_1313 (size=12001) 2024-11-20T11:20:44,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T11:20:45,014 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:45,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T11:20:45,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:45,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T11:20:45,167 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:45,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T11:20:45,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:45,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:45,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101705205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:45,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:45,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101705206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:45,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:45,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101705208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:45,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:45,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101705210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:45,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:45,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101705214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:45,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/adec9dfab7a6492daf76ce072d8eca13 2024-11-20T11:20:45,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/16e3a910bccb4a3e87dfb8ef8642cdf1 is 50, key is test_row_0/C:col10/1732101644032/Put/seqid=0 2024-11-20T11:20:45,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742138_1314 (size=12001) 2024-11-20T11:20:45,320 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:45,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T11:20:45,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:45,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T11:20:45,474 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:45,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T11:20:45,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:45,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,626 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:45,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T11:20:45,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:45,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:45,703 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/16e3a910bccb4a3e87dfb8ef8642cdf1 2024-11-20T11:20:45,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/5854cf6583564cb3aa47df6ea52f7448 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/5854cf6583564cb3aa47df6ea52f7448 2024-11-20T11:20:45,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/5854cf6583564cb3aa47df6ea52f7448, entries=200, sequenceid=57, filesize=38.6 K 2024-11-20T11:20:45,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/adec9dfab7a6492daf76ce072d8eca13 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/adec9dfab7a6492daf76ce072d8eca13 2024-11-20T11:20:45,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/adec9dfab7a6492daf76ce072d8eca13, entries=150, sequenceid=57, filesize=11.7 K 2024-11-20T11:20:45,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/16e3a910bccb4a3e87dfb8ef8642cdf1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/16e3a910bccb4a3e87dfb8ef8642cdf1 2024-11-20T11:20:45,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/16e3a910bccb4a3e87dfb8ef8642cdf1, entries=150, sequenceid=57, filesize=11.7 K 2024-11-20T11:20:45,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 0470447603cf0ef7bd1ff47e79d9530d in 1691ms, sequenceid=57, compaction requested=true 2024-11-20T11:20:45,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:45,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:45,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:45,724 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:45,724 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:45,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:45,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:45,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:45,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:45,725 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:45,725 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:45,726 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/B is initiating minor compaction (all files) 2024-11-20T11:20:45,726 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/A is initiating minor compaction (all files) 2024-11-20T11:20:45,726 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/B in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,726 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/A in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,726 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/b0ee536bddca47bf8bd5526bdde99c32, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/ce57e938a10240ee8e5d34d94b2b4eb2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/adec9dfab7a6492daf76ce072d8eca13] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=35.2 K 2024-11-20T11:20:45,726 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bffaacdffa466caca9f8d4ed5db518, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/4530add9b18a4195a428fe19314b1a0f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/5854cf6583564cb3aa47df6ea52f7448] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=99.1 K 2024-11-20T11:20:45,726 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,726 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bffaacdffa466caca9f8d4ed5db518, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/4530add9b18a4195a428fe19314b1a0f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/5854cf6583564cb3aa47df6ea52f7448] 2024-11-20T11:20:45,727 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting b0ee536bddca47bf8bd5526bdde99c32, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=19, earliestPutTs=1732101640762 2024-11-20T11:20:45,727 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99bffaacdffa466caca9f8d4ed5db518, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=19, earliestPutTs=1732101640762 2024-11-20T11:20:45,727 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting ce57e938a10240ee8e5d34d94b2b4eb2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732101640775 2024-11-20T11:20:45,727 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4530add9b18a4195a428fe19314b1a0f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732101640775 2024-11-20T11:20:45,728 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting adec9dfab7a6492daf76ce072d8eca13, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732101641911 2024-11-20T11:20:45,728 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5854cf6583564cb3aa47df6ea52f7448, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732101641910 2024-11-20T11:20:45,742 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:45,742 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#B#compaction#267 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:45,743 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/7c37fc5ad50a441b944b0c163c75744f is 50, key is test_row_0/B:col10/1732101644032/Put/seqid=0 2024-11-20T11:20:45,752 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112075970d84ee2d4b37ae204bfa4ad73589_0470447603cf0ef7bd1ff47e79d9530d store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:45,755 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112075970d84ee2d4b37ae204bfa4ad73589_0470447603cf0ef7bd1ff47e79d9530d, store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:45,755 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112075970d84ee2d4b37ae204bfa4ad73589_0470447603cf0ef7bd1ff47e79d9530d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:45,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742140_1316 (size=4469) 2024-11-20T11:20:45,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742139_1315 (size=12104) 2024-11-20T11:20:45,780 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:45,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T11:20:45,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:45,781 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T11:20:45,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:45,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:45,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:45,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:45,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:45,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:45,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112014d0ec403eef42c5a3ede8439469ed8d_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101644088/Put/seqid=0 2024-11-20T11:20:45,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742141_1317 (size=12154) 2024-11-20T11:20:45,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T11:20:46,161 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#A#compaction#268 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:46,162 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/7e7e503272e746fea4f7f5c6d6ca48c1 is 175, key is test_row_0/A:col10/1732101644032/Put/seqid=0 2024-11-20T11:20:46,166 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/7c37fc5ad50a441b944b0c163c75744f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/7c37fc5ad50a441b944b0c163c75744f 2024-11-20T11:20:46,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742142_1318 (size=31058) 2024-11-20T11:20:46,171 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/B of 0470447603cf0ef7bd1ff47e79d9530d into 7c37fc5ad50a441b944b0c163c75744f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:46,171 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:46,171 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/B, priority=13, startTime=1732101645724; duration=0sec 2024-11-20T11:20:46,171 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:46,171 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:B 2024-11-20T11:20:46,171 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:46,173 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:46,173 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/C is initiating minor compaction (all files) 2024-11-20T11:20:46,173 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/C in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:46,173 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/6e0289b797034fec89a71d41eb85f0d8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/66546e2dede14596aafffde066642a7c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/16e3a910bccb4a3e87dfb8ef8642cdf1] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=35.2 K 2024-11-20T11:20:46,173 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e0289b797034fec89a71d41eb85f0d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=19, earliestPutTs=1732101640762 2024-11-20T11:20:46,174 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 66546e2dede14596aafffde066642a7c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732101640775 2024-11-20T11:20:46,174 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 16e3a910bccb4a3e87dfb8ef8642cdf1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732101641911 2024-11-20T11:20:46,179 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#C#compaction#270 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:46,180 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/512f1e951ff8489a84af94a05b6af1f4 is 50, key is test_row_0/C:col10/1732101644032/Put/seqid=0 2024-11-20T11:20:46,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742143_1319 (size=12104) 2024-11-20T11:20:46,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:46,201 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112014d0ec403eef42c5a3ede8439469ed8d_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112014d0ec403eef42c5a3ede8439469ed8d_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:46,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/fafeeefbc0644efb92fc2a2059762c58, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:46,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/fafeeefbc0644efb92fc2a2059762c58 is 175, key is test_row_0/A:col10/1732101644088/Put/seqid=0 2024-11-20T11:20:46,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742144_1320 (size=30955) 2024-11-20T11:20:46,207 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/fafeeefbc0644efb92fc2a2059762c58 2024-11-20T11:20:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:46,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:46,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/df9adf14c91941b0b8dfaf75e9aab155 is 50, key is test_row_0/B:col10/1732101644088/Put/seqid=0 2024-11-20T11:20:46,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742145_1321 (size=12001) 2024-11-20T11:20:46,222 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/df9adf14c91941b0b8dfaf75e9aab155 2024-11-20T11:20:46,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/b13ea554e6034b2db220d9388f91260e is 50, key is test_row_0/C:col10/1732101644088/Put/seqid=0 2024-11-20T11:20:46,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101706228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101706231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101706231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101706235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101706236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742146_1322 (size=12001) 2024-11-20T11:20:46,248 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/b13ea554e6034b2db220d9388f91260e 2024-11-20T11:20:46,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/fafeeefbc0644efb92fc2a2059762c58 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/fafeeefbc0644efb92fc2a2059762c58 2024-11-20T11:20:46,256 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/fafeeefbc0644efb92fc2a2059762c58, entries=150, sequenceid=78, filesize=30.2 K 2024-11-20T11:20:46,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/df9adf14c91941b0b8dfaf75e9aab155 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/df9adf14c91941b0b8dfaf75e9aab155 2024-11-20T11:20:46,261 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/df9adf14c91941b0b8dfaf75e9aab155, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T11:20:46,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/b13ea554e6034b2db220d9388f91260e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/b13ea554e6034b2db220d9388f91260e 2024-11-20T11:20:46,266 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/b13ea554e6034b2db220d9388f91260e, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T11:20:46,266 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 0470447603cf0ef7bd1ff47e79d9530d in 485ms, sequenceid=78, compaction requested=false 2024-11-20T11:20:46,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:46,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:46,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-20T11:20:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-20T11:20:46,269 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-20T11:20:46,269 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4050 sec 2024-11-20T11:20:46,270 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.4080 sec 2024-11-20T11:20:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:46,340 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T11:20:46,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:46,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:46,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:46,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:46,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:46,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:46,349 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112030e65e6d4bb9491b959859cbf9b98c07_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101646224/Put/seqid=0 2024-11-20T11:20:46,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742147_1323 (size=17034) 2024-11-20T11:20:46,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101706366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101706366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101706366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101706367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101706367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101706473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101706473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101706474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101706474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101706474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,574 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/7e7e503272e746fea4f7f5c6d6ca48c1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/7e7e503272e746fea4f7f5c6d6ca48c1 2024-11-20T11:20:46,579 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/A of 0470447603cf0ef7bd1ff47e79d9530d into 7e7e503272e746fea4f7f5c6d6ca48c1(size=30.3 K), total size for store is 60.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:46,579 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:46,579 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/A, priority=13, startTime=1732101645724; duration=0sec 2024-11-20T11:20:46,579 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:46,579 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:A 2024-11-20T11:20:46,588 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/512f1e951ff8489a84af94a05b6af1f4 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/512f1e951ff8489a84af94a05b6af1f4 2024-11-20T11:20:46,592 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/C of 0470447603cf0ef7bd1ff47e79d9530d into 512f1e951ff8489a84af94a05b6af1f4(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:46,592 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:46,592 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/C, priority=13, startTime=1732101645725; duration=0sec 2024-11-20T11:20:46,592 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:46,592 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:C 2024-11-20T11:20:46,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101706679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101706679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101706679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101706680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101706680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,758 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:46,762 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112030e65e6d4bb9491b959859cbf9b98c07_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112030e65e6d4bb9491b959859cbf9b98c07_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:46,763 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/8028b984dec7451a9ec0471bdf8894aa, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:46,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/8028b984dec7451a9ec0471bdf8894aa is 175, key is test_row_0/A:col10/1732101646224/Put/seqid=0 2024-11-20T11:20:46,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742148_1324 (size=48139) 2024-11-20T11:20:46,776 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=97, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/8028b984dec7451a9ec0471bdf8894aa 2024-11-20T11:20:46,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/6c7b380115194f4396b89f539ca7cba2 is 50, key is test_row_0/B:col10/1732101646224/Put/seqid=0 2024-11-20T11:20:46,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742149_1325 (size=12001) 2024-11-20T11:20:46,790 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/6c7b380115194f4396b89f539ca7cba2 2024-11-20T11:20:46,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/51ebd98610f7482a8d7e051533a35a19 is 50, key is test_row_0/C:col10/1732101646224/Put/seqid=0 2024-11-20T11:20:46,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742150_1326 (size=12001) 2024-11-20T11:20:46,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T11:20:46,970 INFO [Thread-1395 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-20T11:20:46,971 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:46,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-20T11:20:46,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T11:20:46,973 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:46,973 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:46,973 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:46,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101706984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101706983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101706984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101706984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:46,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:46,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101706986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T11:20:47,125 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T11:20:47,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:47,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:47,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:47,125 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:47,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:47,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:47,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/51ebd98610f7482a8d7e051533a35a19 2024-11-20T11:20:47,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/8028b984dec7451a9ec0471bdf8894aa as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/8028b984dec7451a9ec0471bdf8894aa 2024-11-20T11:20:47,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/8028b984dec7451a9ec0471bdf8894aa, entries=250, sequenceid=97, filesize=47.0 K 2024-11-20T11:20:47,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/6c7b380115194f4396b89f539ca7cba2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/6c7b380115194f4396b89f539ca7cba2 2024-11-20T11:20:47,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/6c7b380115194f4396b89f539ca7cba2, entries=150, sequenceid=97, filesize=11.7 K 2024-11-20T11:20:47,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/51ebd98610f7482a8d7e051533a35a19 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/51ebd98610f7482a8d7e051533a35a19 2024-11-20T11:20:47,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/51ebd98610f7482a8d7e051533a35a19, entries=150, sequenceid=97, filesize=11.7 K 2024-11-20T11:20:47,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 0470447603cf0ef7bd1ff47e79d9530d in 887ms, sequenceid=97, compaction requested=true 2024-11-20T11:20:47,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:47,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:47,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:47,227 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:47,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:47,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:47,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:47,228 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:47,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:47,229 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110152 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:47,229 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:47,229 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/A is initiating minor compaction (all files) 2024-11-20T11:20:47,229 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/B is initiating minor compaction (all files) 2024-11-20T11:20:47,229 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/A in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:47,229 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/B in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:47,229 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/7e7e503272e746fea4f7f5c6d6ca48c1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/fafeeefbc0644efb92fc2a2059762c58, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/8028b984dec7451a9ec0471bdf8894aa] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=107.6 K 2024-11-20T11:20:47,229 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/7c37fc5ad50a441b944b0c163c75744f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/df9adf14c91941b0b8dfaf75e9aab155, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/6c7b380115194f4396b89f539ca7cba2] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=35.3 K 2024-11-20T11:20:47,229 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:47,229 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/7e7e503272e746fea4f7f5c6d6ca48c1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/fafeeefbc0644efb92fc2a2059762c58, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/8028b984dec7451a9ec0471bdf8894aa] 2024-11-20T11:20:47,229 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c37fc5ad50a441b944b0c163c75744f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732101641911 2024-11-20T11:20:47,229 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e7e503272e746fea4f7f5c6d6ca48c1, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732101641911 2024-11-20T11:20:47,230 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting df9adf14c91941b0b8dfaf75e9aab155, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732101644087 2024-11-20T11:20:47,230 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting fafeeefbc0644efb92fc2a2059762c58, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732101644087 2024-11-20T11:20:47,230 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c7b380115194f4396b89f539ca7cba2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732101646224 2024-11-20T11:20:47,231 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8028b984dec7451a9ec0471bdf8894aa, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732101646224 2024-11-20T11:20:47,238 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#B#compaction#276 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:47,239 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:47,239 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/e3351963cf564d3e8fb33f2a83e6c13a is 50, key is test_row_0/B:col10/1732101646224/Put/seqid=0 2024-11-20T11:20:47,241 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120c101cbb489bb4fc7823c8757ef41be71_0470447603cf0ef7bd1ff47e79d9530d store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:47,243 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120c101cbb489bb4fc7823c8757ef41be71_0470447603cf0ef7bd1ff47e79d9530d, store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:47,244 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c101cbb489bb4fc7823c8757ef41be71_0470447603cf0ef7bd1ff47e79d9530d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:47,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742151_1327 (size=12207) 2024-11-20T11:20:47,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742152_1328 (size=4469) 2024-11-20T11:20:47,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T11:20:47,277 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T11:20:47,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:47,278 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T11:20:47,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:47,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:47,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:47,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:47,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:47,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:47,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a2a67e39e2214fb5b6f6a016ce994ec4_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101646366/Put/seqid=0 2024-11-20T11:20:47,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742153_1329 (size=12154) 2024-11-20T11:20:47,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:47,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:47,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101707504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101707506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101707506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101707510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101707510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T11:20:47,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101707611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101707612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101707612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101707616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101707617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,654 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/e3351963cf564d3e8fb33f2a83e6c13a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/e3351963cf564d3e8fb33f2a83e6c13a 2024-11-20T11:20:47,658 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#A#compaction#277 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:47,659 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/B of 0470447603cf0ef7bd1ff47e79d9530d into e3351963cf564d3e8fb33f2a83e6c13a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:47,659 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:47,659 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/B, priority=13, startTime=1732101647227; duration=0sec 2024-11-20T11:20:47,659 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/ba2fb839fdf348b5a1a71eff143d94ff is 175, key is test_row_0/A:col10/1732101646224/Put/seqid=0 2024-11-20T11:20:47,659 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:47,659 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:B 2024-11-20T11:20:47,659 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:47,660 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:47,660 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/C is initiating minor compaction (all files) 2024-11-20T11:20:47,660 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/C in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:47,660 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/512f1e951ff8489a84af94a05b6af1f4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/b13ea554e6034b2db220d9388f91260e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/51ebd98610f7482a8d7e051533a35a19] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=35.3 K 2024-11-20T11:20:47,661 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 512f1e951ff8489a84af94a05b6af1f4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732101641911 2024-11-20T11:20:47,661 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting b13ea554e6034b2db220d9388f91260e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732101644087 2024-11-20T11:20:47,661 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 51ebd98610f7482a8d7e051533a35a19, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732101646224 2024-11-20T11:20:47,667 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#C#compaction#279 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:47,667 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/ec79aa93c7ab471fba8ddf8aa9b7f044 is 50, key is test_row_0/C:col10/1732101646224/Put/seqid=0 2024-11-20T11:20:47,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742154_1330 (size=31161) 2024-11-20T11:20:47,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742155_1331 (size=12207) 2024-11-20T11:20:47,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:47,693 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a2a67e39e2214fb5b6f6a016ce994ec4_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a2a67e39e2214fb5b6f6a016ce994ec4_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:47,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/28b89fab405f4c62ad840197336c0389, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:47,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/28b89fab405f4c62ad840197336c0389 is 175, key is test_row_0/A:col10/1732101646366/Put/seqid=0 2024-11-20T11:20:47,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742156_1332 (size=30955) 2024-11-20T11:20:47,698 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/28b89fab405f4c62ad840197336c0389 2024-11-20T11:20:47,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/008ceabe7df54ff1b038d439f2e79e7e is 50, key is test_row_0/B:col10/1732101646366/Put/seqid=0 2024-11-20T11:20:47,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742157_1333 (size=12001) 2024-11-20T11:20:47,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101707815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101707815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101707815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101707821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:47,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:47,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101707821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,074 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/ba2fb839fdf348b5a1a71eff143d94ff as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/ba2fb839fdf348b5a1a71eff143d94ff 2024-11-20T11:20:48,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T11:20:48,079 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/A of 0470447603cf0ef7bd1ff47e79d9530d into ba2fb839fdf348b5a1a71eff143d94ff(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:48,079 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:48,079 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/A, priority=13, startTime=1732101647227; duration=0sec 2024-11-20T11:20:48,079 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:48,079 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:A 2024-11-20T11:20:48,081 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/ec79aa93c7ab471fba8ddf8aa9b7f044 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/ec79aa93c7ab471fba8ddf8aa9b7f044 2024-11-20T11:20:48,085 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/C of 0470447603cf0ef7bd1ff47e79d9530d into ec79aa93c7ab471fba8ddf8aa9b7f044(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:48,085 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:48,085 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/C, priority=13, startTime=1732101647228; duration=0sec 2024-11-20T11:20:48,086 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:48,086 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:C 2024-11-20T11:20:48,108 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/008ceabe7df54ff1b038d439f2e79e7e 2024-11-20T11:20:48,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/35b86407f4bb46b883ba1bfa47242ffb is 50, key is test_row_0/C:col10/1732101646366/Put/seqid=0 2024-11-20T11:20:48,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742158_1334 (size=12001) 2024-11-20T11:20:48,119 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/35b86407f4bb46b883ba1bfa47242ffb 2024-11-20T11:20:48,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101708118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101708119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/28b89fab405f4c62ad840197336c0389 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/28b89fab405f4c62ad840197336c0389 2024-11-20T11:20:48,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101708121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,127 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/28b89fab405f4c62ad840197336c0389, entries=150, sequenceid=118, filesize=30.2 K 2024-11-20T11:20:48,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101708125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/008ceabe7df54ff1b038d439f2e79e7e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/008ceabe7df54ff1b038d439f2e79e7e 2024-11-20T11:20:48,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101708125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,132 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/008ceabe7df54ff1b038d439f2e79e7e, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T11:20:48,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/35b86407f4bb46b883ba1bfa47242ffb as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/35b86407f4bb46b883ba1bfa47242ffb 2024-11-20T11:20:48,136 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/35b86407f4bb46b883ba1bfa47242ffb, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T11:20:48,137 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 0470447603cf0ef7bd1ff47e79d9530d in 859ms, sequenceid=118, compaction requested=false 2024-11-20T11:20:48,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:48,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:48,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-20T11:20:48,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-20T11:20:48,139 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-20T11:20:48,140 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1650 sec 2024-11-20T11:20:48,141 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.1690 sec 2024-11-20T11:20:48,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:48,629 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T11:20:48,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:48,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:48,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:48,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:48,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:48,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:48,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101708642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206df914d0e52446188ac63d6e39acf194_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101647503/Put/seqid=0 2024-11-20T11:20:48,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101708643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101708643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101708644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742159_1335 (size=14794) 2024-11-20T11:20:48,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101708645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101708748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101708750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101708751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101708751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101708753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101708953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101708956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101708956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101708957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:48,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:48,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101708958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,053 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:49,058 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206df914d0e52446188ac63d6e39acf194_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206df914d0e52446188ac63d6e39acf194_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:49,059 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/62395fb3697e4ad98eaff04175b117a5, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:49,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/62395fb3697e4ad98eaff04175b117a5 is 175, key is test_row_0/A:col10/1732101647503/Put/seqid=0 2024-11-20T11:20:49,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742160_1336 (size=39749) 2024-11-20T11:20:49,075 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=140, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/62395fb3697e4ad98eaff04175b117a5 2024-11-20T11:20:49,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T11:20:49,076 INFO [Thread-1395 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-20T11:20:49,079 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:49,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-20T11:20:49,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T11:20:49,080 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:49,081 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:49,081 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:49,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/471238cdb572431080c9112c04d7d62a is 50, key is test_row_0/B:col10/1732101647503/Put/seqid=0 2024-11-20T11:20:49,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742161_1337 (size=12151) 2024-11-20T11:20:49,090 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/471238cdb572431080c9112c04d7d62a 2024-11-20T11:20:49,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/362eea827395459ab211d94a67a52452 is 50, key is test_row_0/C:col10/1732101647503/Put/seqid=0 2024-11-20T11:20:49,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742162_1338 (size=12151) 2024-11-20T11:20:49,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T11:20:49,233 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T11:20:49,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:49,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:49,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:49,234 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:49,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:49,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:49,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101709257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101709260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101709260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101709262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101709262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T11:20:49,386 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T11:20:49,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:49,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:49,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:49,387 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:49,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:49,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:49,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/362eea827395459ab211d94a67a52452 2024-11-20T11:20:49,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/62395fb3697e4ad98eaff04175b117a5 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/62395fb3697e4ad98eaff04175b117a5 2024-11-20T11:20:49,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/62395fb3697e4ad98eaff04175b117a5, entries=200, sequenceid=140, filesize=38.8 K 2024-11-20T11:20:49,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/471238cdb572431080c9112c04d7d62a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/471238cdb572431080c9112c04d7d62a 2024-11-20T11:20:49,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/471238cdb572431080c9112c04d7d62a, entries=150, sequenceid=140, filesize=11.9 K 2024-11-20T11:20:49,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/362eea827395459ab211d94a67a52452 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/362eea827395459ab211d94a67a52452 2024-11-20T11:20:49,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/362eea827395459ab211d94a67a52452, entries=150, sequenceid=140, filesize=11.9 K 2024-11-20T11:20:49,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 0470447603cf0ef7bd1ff47e79d9530d in 892ms, sequenceid=140, compaction requested=true 2024-11-20T11:20:49,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:49,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:49,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:49,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:49,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:49,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:49,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:49,522 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:49,522 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:49,523 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:49,523 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:49,523 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/B is initiating minor compaction (all files) 2024-11-20T11:20:49,523 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/A is initiating minor compaction (all files) 2024-11-20T11:20:49,523 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/B in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:49,523 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/A in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:49,523 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/e3351963cf564d3e8fb33f2a83e6c13a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/008ceabe7df54ff1b038d439f2e79e7e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/471238cdb572431080c9112c04d7d62a] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=35.5 K 2024-11-20T11:20:49,524 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/ba2fb839fdf348b5a1a71eff143d94ff, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/28b89fab405f4c62ad840197336c0389, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/62395fb3697e4ad98eaff04175b117a5] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=99.5 K 2024-11-20T11:20:49,524 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:49,524 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/ba2fb839fdf348b5a1a71eff143d94ff, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/28b89fab405f4c62ad840197336c0389, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/62395fb3697e4ad98eaff04175b117a5] 2024-11-20T11:20:49,524 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting e3351963cf564d3e8fb33f2a83e6c13a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732101646224 2024-11-20T11:20:49,524 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba2fb839fdf348b5a1a71eff143d94ff, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732101646224 2024-11-20T11:20:49,524 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 008ceabe7df54ff1b038d439f2e79e7e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732101646358 2024-11-20T11:20:49,524 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 471238cdb572431080c9112c04d7d62a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732101647503 2024-11-20T11:20:49,524 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28b89fab405f4c62ad840197336c0389, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732101646358 2024-11-20T11:20:49,525 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62395fb3697e4ad98eaff04175b117a5, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732101647503 2024-11-20T11:20:49,531 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#B#compaction#285 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:49,532 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/8d72375dba2844ea891e6bdcf63d28b4 is 50, key is test_row_0/B:col10/1732101647503/Put/seqid=0 2024-11-20T11:20:49,532 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:49,535 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120ef557ee476ab4159b57235d773359bbe_0470447603cf0ef7bd1ff47e79d9530d store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:49,536 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120ef557ee476ab4159b57235d773359bbe_0470447603cf0ef7bd1ff47e79d9530d, store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:49,536 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ef557ee476ab4159b57235d773359bbe_0470447603cf0ef7bd1ff47e79d9530d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:49,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742163_1339 (size=12459) 2024-11-20T11:20:49,539 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T11:20:49,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:49,540 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T11:20:49,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:49,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:49,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:49,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:49,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:49,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:49,545 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/8d72375dba2844ea891e6bdcf63d28b4 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/8d72375dba2844ea891e6bdcf63d28b4 2024-11-20T11:20:49,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742164_1340 (size=4469) 2024-11-20T11:20:49,550 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/B of 0470447603cf0ef7bd1ff47e79d9530d into 8d72375dba2844ea891e6bdcf63d28b4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:49,550 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:49,550 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/B, priority=13, startTime=1732101649522; duration=0sec 2024-11-20T11:20:49,550 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:49,550 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:B 2024-11-20T11:20:49,550 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:49,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cadccb154d274125bf482d880e821eaf_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101648644/Put/seqid=0 2024-11-20T11:20:49,552 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:49,552 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/C is initiating minor compaction (all files) 2024-11-20T11:20:49,552 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/C in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:49,552 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/ec79aa93c7ab471fba8ddf8aa9b7f044, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/35b86407f4bb46b883ba1bfa47242ffb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/362eea827395459ab211d94a67a52452] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=35.5 K 2024-11-20T11:20:49,553 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting ec79aa93c7ab471fba8ddf8aa9b7f044, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732101646224 2024-11-20T11:20:49,553 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 35b86407f4bb46b883ba1bfa47242ffb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732101646358 2024-11-20T11:20:49,553 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 362eea827395459ab211d94a67a52452, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732101647503 2024-11-20T11:20:49,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742165_1341 (size=12304) 2024-11-20T11:20:49,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:49,562 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cadccb154d274125bf482d880e821eaf_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cadccb154d274125bf482d880e821eaf_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:49,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/eb4184a235694b529832b22988535ec7, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:49,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/eb4184a235694b529832b22988535ec7 is 175, key is test_row_0/A:col10/1732101648644/Put/seqid=0 2024-11-20T11:20:49,569 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#C#compaction#288 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:49,570 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/c5148b9e3adf4671a23dff7c0f5c4022 is 50, key is test_row_0/C:col10/1732101647503/Put/seqid=0 2024-11-20T11:20:49,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742166_1342 (size=31105) 2024-11-20T11:20:49,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742167_1343 (size=12459) 2024-11-20T11:20:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T11:20:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:49,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:49,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101709792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101709791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101709792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101709793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101709801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101709902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101709902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101709902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101709902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:49,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101709907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:49,947 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#A#compaction#286 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:49,948 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/102fc35f6feb438ba0a30f680c01b2d9 is 175, key is test_row_0/A:col10/1732101647503/Put/seqid=0 2024-11-20T11:20:49,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742168_1344 (size=31413) 2024-11-20T11:20:49,975 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/eb4184a235694b529832b22988535ec7 2024-11-20T11:20:49,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/b45bdea323b2478a9bc584fcd0f4ab81 is 50, key is test_row_0/B:col10/1732101648644/Put/seqid=0 2024-11-20T11:20:49,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742169_1345 (size=12151) 2024-11-20T11:20:49,990 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/c5148b9e3adf4671a23dff7c0f5c4022 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c5148b9e3adf4671a23dff7c0f5c4022 2024-11-20T11:20:49,994 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/C of 0470447603cf0ef7bd1ff47e79d9530d into c5148b9e3adf4671a23dff7c0f5c4022(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:49,994 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:49,994 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/C, priority=13, startTime=1732101649522; duration=0sec 2024-11-20T11:20:49,994 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:49,994 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:C 2024-11-20T11:20:50,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101710107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101710107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101710108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101710108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101710112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T11:20:50,357 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/102fc35f6feb438ba0a30f680c01b2d9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/102fc35f6feb438ba0a30f680c01b2d9 2024-11-20T11:20:50,362 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/A of 0470447603cf0ef7bd1ff47e79d9530d into 102fc35f6feb438ba0a30f680c01b2d9(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:50,362 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:50,362 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/A, priority=13, startTime=1732101649521; duration=0sec 2024-11-20T11:20:50,362 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:50,362 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:A 2024-11-20T11:20:50,390 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/b45bdea323b2478a9bc584fcd0f4ab81 2024-11-20T11:20:50,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/c629a2a5602a4a8f995f5eff749c46ae is 50, key is test_row_0/C:col10/1732101648644/Put/seqid=0 2024-11-20T11:20:50,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742170_1346 (size=12151) 2024-11-20T11:20:50,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101710413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101710413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101710414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101710414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101710415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,804 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/c629a2a5602a4a8f995f5eff749c46ae 2024-11-20T11:20:50,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/eb4184a235694b529832b22988535ec7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/eb4184a235694b529832b22988535ec7 2024-11-20T11:20:50,812 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/eb4184a235694b529832b22988535ec7, entries=150, sequenceid=157, filesize=30.4 K 2024-11-20T11:20:50,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/b45bdea323b2478a9bc584fcd0f4ab81 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/b45bdea323b2478a9bc584fcd0f4ab81 2024-11-20T11:20:50,817 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/b45bdea323b2478a9bc584fcd0f4ab81, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T11:20:50,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/c629a2a5602a4a8f995f5eff749c46ae as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c629a2a5602a4a8f995f5eff749c46ae 2024-11-20T11:20:50,822 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c629a2a5602a4a8f995f5eff749c46ae, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T11:20:50,823 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 0470447603cf0ef7bd1ff47e79d9530d in 1282ms, sequenceid=157, compaction requested=false 2024-11-20T11:20:50,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:50,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:50,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-20T11:20:50,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-20T11:20:50,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-20T11:20:50,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7430 sec 2024-11-20T11:20:50,826 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.7470 sec 2024-11-20T11:20:50,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:50,920 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T11:20:50,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:50,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:50,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:50,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:50,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:50,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:50,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207c8063324dd64028aeef9ef3bad4c2ab_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101649800/Put/seqid=0 2024-11-20T11:20:50,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742171_1347 (size=14794) 2024-11-20T11:20:50,934 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:50,938 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207c8063324dd64028aeef9ef3bad4c2ab_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207c8063324dd64028aeef9ef3bad4c2ab_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:50,939 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/99bbe95482584fb8a840ce258f64e176, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:50,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/99bbe95482584fb8a840ce258f64e176 is 175, key is test_row_0/A:col10/1732101649800/Put/seqid=0 2024-11-20T11:20:50,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101710933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101710934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101710937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742172_1348 (size=39749) 2024-11-20T11:20:50,944 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=180, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/99bbe95482584fb8a840ce258f64e176 2024-11-20T11:20:50,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101710941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:50,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101710943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:50,949 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/f4f6319acb584a74b9b721384ac16f8a is 50, key is test_row_0/B:col10/1732101649800/Put/seqid=0 2024-11-20T11:20:50,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742173_1349 (size=12151) 2024-11-20T11:20:51,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101711042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101711042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101711043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101711050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101711050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T11:20:51,184 INFO [Thread-1395 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-20T11:20:51,185 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:51,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-20T11:20:51,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T11:20:51,187 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:51,187 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:51,188 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:51,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101711245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101711246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101711247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101711254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101711255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T11:20:51,338 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T11:20:51,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:51,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:51,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:51,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:51,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:51,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:51,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/f4f6319acb584a74b9b721384ac16f8a 2024-11-20T11:20:51,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/3d110159a0be4099afe00d0fbfd3b64a is 50, key is test_row_0/C:col10/1732101649800/Put/seqid=0 2024-11-20T11:20:51,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742174_1350 (size=12151) 2024-11-20T11:20:51,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T11:20:51,491 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T11:20:51,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:51,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:51,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:51,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:51,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:51,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:51,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101711551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101711551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101711552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101711559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:51,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101711561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,644 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T11:20:51,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:51,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:51,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:51,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:51,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:51,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/3d110159a0be4099afe00d0fbfd3b64a 2024-11-20T11:20:51,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/99bbe95482584fb8a840ce258f64e176 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bbe95482584fb8a840ce258f64e176 2024-11-20T11:20:51,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bbe95482584fb8a840ce258f64e176, entries=200, sequenceid=180, filesize=38.8 K 2024-11-20T11:20:51,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/f4f6319acb584a74b9b721384ac16f8a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/f4f6319acb584a74b9b721384ac16f8a 2024-11-20T11:20:51,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/f4f6319acb584a74b9b721384ac16f8a, entries=150, sequenceid=180, filesize=11.9 K 2024-11-20T11:20:51,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/3d110159a0be4099afe00d0fbfd3b64a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/3d110159a0be4099afe00d0fbfd3b64a 2024-11-20T11:20:51,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T11:20:51,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/3d110159a0be4099afe00d0fbfd3b64a, entries=150, sequenceid=180, filesize=11.9 K 2024-11-20T11:20:51,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 0470447603cf0ef7bd1ff47e79d9530d in 870ms, sequenceid=180, compaction requested=true 2024-11-20T11:20:51,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:51,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:51,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:51,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:51,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:51,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:51,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:51,790 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:51,790 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:51,791 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:51,791 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:51,791 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/A is initiating minor compaction (all files) 2024-11-20T11:20:51,791 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/B is initiating minor compaction (all files) 2024-11-20T11:20:51,791 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/B in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:51,791 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/A in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:51,791 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/8d72375dba2844ea891e6bdcf63d28b4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/b45bdea323b2478a9bc584fcd0f4ab81, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/f4f6319acb584a74b9b721384ac16f8a] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=35.9 K 2024-11-20T11:20:51,791 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/102fc35f6feb438ba0a30f680c01b2d9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/eb4184a235694b529832b22988535ec7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bbe95482584fb8a840ce258f64e176] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=99.9 K 2024-11-20T11:20:51,792 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:51,792 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/102fc35f6feb438ba0a30f680c01b2d9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/eb4184a235694b529832b22988535ec7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bbe95482584fb8a840ce258f64e176] 2024-11-20T11:20:51,792 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d72375dba2844ea891e6bdcf63d28b4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732101647503 2024-11-20T11:20:51,792 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 102fc35f6feb438ba0a30f680c01b2d9, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732101647503 2024-11-20T11:20:51,792 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting b45bdea323b2478a9bc584fcd0f4ab81, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732101648639 2024-11-20T11:20:51,792 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting f4f6319acb584a74b9b721384ac16f8a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732101649792 2024-11-20T11:20:51,792 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb4184a235694b529832b22988535ec7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732101648639 2024-11-20T11:20:51,793 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99bbe95482584fb8a840ce258f64e176, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732101649791 2024-11-20T11:20:51,796 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:51,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T11:20:51,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:51,797 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T11:20:51,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:51,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:51,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:51,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:51,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:51,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:51,801 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:51,801 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#B#compaction#294 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:51,802 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/2cea8c8160e24089ac2f6969e7f1ee19 is 50, key is test_row_0/B:col10/1732101649800/Put/seqid=0 2024-11-20T11:20:51,804 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411201968cbffa9184113823e6fd35834a41a_0470447603cf0ef7bd1ff47e79d9530d store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:51,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a8f389f7b8e74853bec349bc73a680f5_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101650937/Put/seqid=0 2024-11-20T11:20:51,806 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411201968cbffa9184113823e6fd35834a41a_0470447603cf0ef7bd1ff47e79d9530d, store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:51,806 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201968cbffa9184113823e6fd35834a41a_0470447603cf0ef7bd1ff47e79d9530d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:51,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742175_1351 (size=4469) 2024-11-20T11:20:51,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742176_1352 (size=12561) 2024-11-20T11:20:51,824 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/2cea8c8160e24089ac2f6969e7f1ee19 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/2cea8c8160e24089ac2f6969e7f1ee19 2024-11-20T11:20:51,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742177_1353 (size=12304) 2024-11-20T11:20:51,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:51,829 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/B of 0470447603cf0ef7bd1ff47e79d9530d into 2cea8c8160e24089ac2f6969e7f1ee19(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:51,829 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:51,829 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/B, priority=13, startTime=1732101651790; duration=0sec 2024-11-20T11:20:51,829 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:51,829 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:B 2024-11-20T11:20:51,829 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:51,831 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a8f389f7b8e74853bec349bc73a680f5_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a8f389f7b8e74853bec349bc73a680f5_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:51,831 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:51,831 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/C is initiating minor compaction (all files) 2024-11-20T11:20:51,831 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/C in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:51,831 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c5148b9e3adf4671a23dff7c0f5c4022, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c629a2a5602a4a8f995f5eff749c46ae, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/3d110159a0be4099afe00d0fbfd3b64a] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=35.9 K 2024-11-20T11:20:51,831 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting c5148b9e3adf4671a23dff7c0f5c4022, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732101647503 2024-11-20T11:20:51,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/874307cb626d493ba71f26cbbf3f31a9, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:51,832 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting c629a2a5602a4a8f995f5eff749c46ae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732101648639 2024-11-20T11:20:51,832 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d110159a0be4099afe00d0fbfd3b64a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732101649792 2024-11-20T11:20:51,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/874307cb626d493ba71f26cbbf3f31a9 is 175, key is test_row_0/A:col10/1732101650937/Put/seqid=0 2024-11-20T11:20:51,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742178_1354 (size=31105) 2024-11-20T11:20:51,840 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#C#compaction#297 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:51,840 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/abaf854956ff410387f756f0017caf5f is 50, key is test_row_0/C:col10/1732101649800/Put/seqid=0 2024-11-20T11:20:51,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742179_1355 (size=12561) 2024-11-20T11:20:52,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:52,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:52,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101712078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101712079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101712079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101712079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101712082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101712185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101712185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101712185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101712185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101712186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,212 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#A#compaction#295 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:52,212 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/26461cf64a9f44ba856eea6d557252f7 is 175, key is test_row_0/A:col10/1732101649800/Put/seqid=0 2024-11-20T11:20:52,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742180_1356 (size=31515) 2024-11-20T11:20:52,238 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/874307cb626d493ba71f26cbbf3f31a9 2024-11-20T11:20:52,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/75e94e8862754d0bbdef8d339d535622 is 50, key is test_row_0/B:col10/1732101650937/Put/seqid=0 2024-11-20T11:20:52,251 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/abaf854956ff410387f756f0017caf5f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/abaf854956ff410387f756f0017caf5f 2024-11-20T11:20:52,256 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/C of 0470447603cf0ef7bd1ff47e79d9530d into abaf854956ff410387f756f0017caf5f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:52,256 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:52,256 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/C, priority=13, startTime=1732101651790; duration=0sec 2024-11-20T11:20:52,256 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:52,256 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:C 2024-11-20T11:20:52,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742181_1357 (size=12151) 2024-11-20T11:20:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T11:20:52,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101712389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101712389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101712389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101712390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101712390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,621 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/26461cf64a9f44ba856eea6d557252f7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/26461cf64a9f44ba856eea6d557252f7 2024-11-20T11:20:52,625 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/A of 0470447603cf0ef7bd1ff47e79d9530d into 26461cf64a9f44ba856eea6d557252f7(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:52,626 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:52,626 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/A, priority=13, startTime=1732101651790; duration=0sec 2024-11-20T11:20:52,626 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:52,626 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:A 2024-11-20T11:20:52,663 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/75e94e8862754d0bbdef8d339d535622 2024-11-20T11:20:52,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/c7c202ac432a4ebcab07e37eafa8b809 is 50, key is test_row_0/C:col10/1732101650937/Put/seqid=0 2024-11-20T11:20:52,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742182_1358 (size=12151) 2024-11-20T11:20:52,692 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/c7c202ac432a4ebcab07e37eafa8b809 2024-11-20T11:20:52,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101712691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101712694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101712694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/874307cb626d493ba71f26cbbf3f31a9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/874307cb626d493ba71f26cbbf3f31a9 2024-11-20T11:20:52,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101712696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101712696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:52,702 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/874307cb626d493ba71f26cbbf3f31a9, entries=150, sequenceid=197, filesize=30.4 K 2024-11-20T11:20:52,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/75e94e8862754d0bbdef8d339d535622 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/75e94e8862754d0bbdef8d339d535622 2024-11-20T11:20:52,706 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/75e94e8862754d0bbdef8d339d535622, entries=150, sequenceid=197, filesize=11.9 K 2024-11-20T11:20:52,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/c7c202ac432a4ebcab07e37eafa8b809 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c7c202ac432a4ebcab07e37eafa8b809 2024-11-20T11:20:52,712 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c7c202ac432a4ebcab07e37eafa8b809, entries=150, sequenceid=197, filesize=11.9 K 2024-11-20T11:20:52,712 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 0470447603cf0ef7bd1ff47e79d9530d in 915ms, sequenceid=197, compaction requested=false 2024-11-20T11:20:52,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:52,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:52,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-20T11:20:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-20T11:20:52,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-20T11:20:52,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5280 sec 2024-11-20T11:20:52,717 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.5310 sec 2024-11-20T11:20:53,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:53,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T11:20:53,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:53,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:53,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:53,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:53,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:53,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:53,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cd139b4a459746218f1f9972b64dd59e_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101652076/Put/seqid=0 2024-11-20T11:20:53,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742183_1359 (size=14794) 2024-11-20T11:20:53,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101713212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101713215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101713215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101713216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101713217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T11:20:53,290 INFO [Thread-1395 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-20T11:20:53,291 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-20T11:20:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T11:20:53,293 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:53,293 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:53,294 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:53,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101713318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101713322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101713322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101713323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101713324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T11:20:53,445 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T11:20:53,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:53,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:53,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:53,446 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:53,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:53,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101713523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101713529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101713530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101713531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101713531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T11:20:53,598 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,598 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T11:20:53,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:53,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:53,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:53,599 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:53,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:53,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:53,614 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:53,618 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cd139b4a459746218f1f9972b64dd59e_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cd139b4a459746218f1f9972b64dd59e_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:53,619 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/1a936cce6de341138f77c9b9c2fcf328, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:53,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/1a936cce6de341138f77c9b9c2fcf328 is 175, key is test_row_0/A:col10/1732101652076/Put/seqid=0 2024-11-20T11:20:53,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742184_1360 (size=39749) 2024-11-20T11:20:53,751 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T11:20:53,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:53,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:53,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:53,752 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:53,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:53,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:53,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101713829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101713833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101713834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101713834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:53,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101713835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T11:20:53,904 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:53,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T11:20:53,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:53,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:53,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:53,905 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:53,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:53,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,025 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=222, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/1a936cce6de341138f77c9b9c2fcf328 2024-11-20T11:20:54,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/95c98c4449d648f5bed4703e39a827b2 is 50, key is test_row_0/B:col10/1732101652076/Put/seqid=0 2024-11-20T11:20:54,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742185_1361 (size=12151) 2024-11-20T11:20:54,056 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:54,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T11:20:54,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:54,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,057 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,209 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:54,210 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T11:20:54,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:54,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,210 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:54,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101714337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:54,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:54,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101714338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:54,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:54,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101714338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:54,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:54,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101714338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:54,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:54,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101714339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:54,362 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:54,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T11:20:54,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:54,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,363 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T11:20:54,440 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/95c98c4449d648f5bed4703e39a827b2 2024-11-20T11:20:54,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/6b58a701886c4a3d81cf0a996590bcc2 is 50, key is test_row_0/C:col10/1732101652076/Put/seqid=0 2024-11-20T11:20:54,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742186_1362 (size=12151) 2024-11-20T11:20:54,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:54,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T11:20:54,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:54,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,515 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,667 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:54,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T11:20:54,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:54,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,820 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:54,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T11:20:54,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:54,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,820 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:54,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/6b58a701886c4a3d81cf0a996590bcc2 2024-11-20T11:20:54,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/1a936cce6de341138f77c9b9c2fcf328 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/1a936cce6de341138f77c9b9c2fcf328 2024-11-20T11:20:54,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/1a936cce6de341138f77c9b9c2fcf328, entries=200, sequenceid=222, filesize=38.8 K 2024-11-20T11:20:54,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/95c98c4449d648f5bed4703e39a827b2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/95c98c4449d648f5bed4703e39a827b2 2024-11-20T11:20:54,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/95c98c4449d648f5bed4703e39a827b2, entries=150, sequenceid=222, filesize=11.9 K 2024-11-20T11:20:54,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/6b58a701886c4a3d81cf0a996590bcc2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/6b58a701886c4a3d81cf0a996590bcc2 2024-11-20T11:20:54,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/6b58a701886c4a3d81cf0a996590bcc2, entries=150, sequenceid=222, filesize=11.9 K 2024-11-20T11:20:54,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 0470447603cf0ef7bd1ff47e79d9530d in 1667ms, sequenceid=222, compaction requested=true 2024-11-20T11:20:54,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:54,870 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:54,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:54,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:54,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:54,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:54,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:54,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:54,870 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:54,870 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:54,871 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/A is initiating minor compaction (all files) 2024-11-20T11:20:54,871 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/A in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,871 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:54,871 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/B is initiating minor compaction (all files) 2024-11-20T11:20:54,871 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/26461cf64a9f44ba856eea6d557252f7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/874307cb626d493ba71f26cbbf3f31a9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/1a936cce6de341138f77c9b9c2fcf328] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=100.0 K 2024-11-20T11:20:54,871 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/B in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,871 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,871 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/26461cf64a9f44ba856eea6d557252f7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/874307cb626d493ba71f26cbbf3f31a9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/1a936cce6de341138f77c9b9c2fcf328] 2024-11-20T11:20:54,871 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/2cea8c8160e24089ac2f6969e7f1ee19, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/75e94e8862754d0bbdef8d339d535622, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/95c98c4449d648f5bed4703e39a827b2] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=36.0 K 2024-11-20T11:20:54,871 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cea8c8160e24089ac2f6969e7f1ee19, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732101649792 2024-11-20T11:20:54,871 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26461cf64a9f44ba856eea6d557252f7, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732101649792 2024-11-20T11:20:54,872 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 874307cb626d493ba71f26cbbf3f31a9, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732101650931 2024-11-20T11:20:54,872 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 75e94e8862754d0bbdef8d339d535622, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732101650931 2024-11-20T11:20:54,872 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 95c98c4449d648f5bed4703e39a827b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732101652076 2024-11-20T11:20:54,872 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a936cce6de341138f77c9b9c2fcf328, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732101652076 2024-11-20T11:20:54,881 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:54,882 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#B#compaction#303 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:54,883 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112023297e9b8a3547938f50cb68bc687f81_0470447603cf0ef7bd1ff47e79d9530d store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:54,883 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/5a34e3705e5b4eb68d814c8927da1bba is 50, key is test_row_0/B:col10/1732101652076/Put/seqid=0 2024-11-20T11:20:54,884 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112023297e9b8a3547938f50cb68bc687f81_0470447603cf0ef7bd1ff47e79d9530d, store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:54,885 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112023297e9b8a3547938f50cb68bc687f81_0470447603cf0ef7bd1ff47e79d9530d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:54,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742187_1363 (size=12663) 2024-11-20T11:20:54,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742188_1364 (size=4469) 2024-11-20T11:20:54,972 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:54,973 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T11:20:54,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:54,973 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T11:20:54,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:54,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:54,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:54,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:54,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:54,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:54,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120049f99f687464b71b4342dee88495feb_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101653213/Put/seqid=0 2024-11-20T11:20:54,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742189_1365 (size=12304) 2024-11-20T11:20:55,291 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#A#compaction#304 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:55,292 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/128b18045ccc4b83bb8a680d7d94156b is 175, key is test_row_0/A:col10/1732101652076/Put/seqid=0 2024-11-20T11:20:55,294 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/5a34e3705e5b4eb68d814c8927da1bba as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/5a34e3705e5b4eb68d814c8927da1bba 2024-11-20T11:20:55,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742190_1366 (size=31617) 2024-11-20T11:20:55,300 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/B of 0470447603cf0ef7bd1ff47e79d9530d into 5a34e3705e5b4eb68d814c8927da1bba(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:55,300 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:55,300 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/B, priority=13, startTime=1732101654870; duration=0sec 2024-11-20T11:20:55,301 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:55,301 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:B 2024-11-20T11:20:55,301 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:55,302 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/128b18045ccc4b83bb8a680d7d94156b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/128b18045ccc4b83bb8a680d7d94156b 2024-11-20T11:20:55,302 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:55,302 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/C is initiating minor compaction (all files) 2024-11-20T11:20:55,302 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/C in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:55,302 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/abaf854956ff410387f756f0017caf5f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c7c202ac432a4ebcab07e37eafa8b809, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/6b58a701886c4a3d81cf0a996590bcc2] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=36.0 K 2024-11-20T11:20:55,303 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting abaf854956ff410387f756f0017caf5f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732101649792 2024-11-20T11:20:55,303 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting c7c202ac432a4ebcab07e37eafa8b809, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732101650931 2024-11-20T11:20:55,304 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b58a701886c4a3d81cf0a996590bcc2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732101652076 2024-11-20T11:20:55,308 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/A of 0470447603cf0ef7bd1ff47e79d9530d into 128b18045ccc4b83bb8a680d7d94156b(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:55,308 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:55,308 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/A, priority=13, startTime=1732101654869; duration=0sec 2024-11-20T11:20:55,308 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:55,308 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:A 2024-11-20T11:20:55,312 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#C#compaction#306 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:55,313 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/1f9d9aaffd324b739cff327bb8e47026 is 50, key is test_row_0/C:col10/1732101652076/Put/seqid=0 2024-11-20T11:20:55,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742191_1367 (size=12663) 2024-11-20T11:20:55,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:55,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:55,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101715366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101715367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101715368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101715368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101715369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:55,388 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120049f99f687464b71b4342dee88495feb_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120049f99f687464b71b4342dee88495feb_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:55,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/0c9d8bec0dae452da90637ed3930f983, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:55,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/0c9d8bec0dae452da90637ed3930f983 is 175, key is test_row_0/A:col10/1732101653213/Put/seqid=0 2024-11-20T11:20:55,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T11:20:55,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742192_1368 (size=31105) 2024-11-20T11:20:55,397 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=236, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/0c9d8bec0dae452da90637ed3930f983 2024-11-20T11:20:55,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/0f1cc89c879a4a18bc82498260dd6dc6 is 50, key is test_row_0/B:col10/1732101653213/Put/seqid=0 2024-11-20T11:20:55,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742193_1369 (size=12151) 2024-11-20T11:20:55,408 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/0f1cc89c879a4a18bc82498260dd6dc6 2024-11-20T11:20:55,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/a59eb25824364b338876e086efa80397 is 50, key is test_row_0/C:col10/1732101653213/Put/seqid=0 2024-11-20T11:20:55,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742194_1370 (size=12151) 2024-11-20T11:20:55,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101715472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101715472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101715473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101715473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101715473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101715676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101715676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101715678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101715678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101715679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:55,722 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/1f9d9aaffd324b739cff327bb8e47026 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/1f9d9aaffd324b739cff327bb8e47026 2024-11-20T11:20:55,726 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/C of 0470447603cf0ef7bd1ff47e79d9530d into 1f9d9aaffd324b739cff327bb8e47026(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:55,726 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:55,726 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/C, priority=13, startTime=1732101654870; duration=0sec 2024-11-20T11:20:55,726 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:55,727 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:C 2024-11-20T11:20:55,818 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/a59eb25824364b338876e086efa80397 2024-11-20T11:20:55,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/0c9d8bec0dae452da90637ed3930f983 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/0c9d8bec0dae452da90637ed3930f983 2024-11-20T11:20:55,825 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/0c9d8bec0dae452da90637ed3930f983, entries=150, sequenceid=236, filesize=30.4 K 2024-11-20T11:20:55,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/0f1cc89c879a4a18bc82498260dd6dc6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/0f1cc89c879a4a18bc82498260dd6dc6 2024-11-20T11:20:55,829 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/0f1cc89c879a4a18bc82498260dd6dc6, entries=150, sequenceid=236, filesize=11.9 K 2024-11-20T11:20:55,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/a59eb25824364b338876e086efa80397 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/a59eb25824364b338876e086efa80397 2024-11-20T11:20:55,834 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/a59eb25824364b338876e086efa80397, entries=150, sequenceid=236, filesize=11.9 K 2024-11-20T11:20:55,835 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 0470447603cf0ef7bd1ff47e79d9530d in 862ms, sequenceid=236, compaction requested=false 2024-11-20T11:20:55,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:55,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:55,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-20T11:20:55,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-20T11:20:55,838 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-20T11:20:55,838 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5430 sec 2024-11-20T11:20:55,839 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 2.5460 sec 2024-11-20T11:20:55,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:55,981 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T11:20:55,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:55,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:55,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:55,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:55,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:55,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:55,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a5c6b8e879a04eeeafde52287b9cbb00_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101655361/Put/seqid=0 2024-11-20T11:20:55,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742195_1371 (size=14944) 2024-11-20T11:20:55,996 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:55,999 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a5c6b8e879a04eeeafde52287b9cbb00_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a5c6b8e879a04eeeafde52287b9cbb00_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:56,000 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/9162eb0652a34a18b7fa3b4d47192ecc, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:56,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/9162eb0652a34a18b7fa3b4d47192ecc is 175, key is test_row_0/A:col10/1732101655361/Put/seqid=0 2024-11-20T11:20:56,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742196_1372 (size=39899) 2024-11-20T11:20:56,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101715990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101716001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101716024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101716024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101716024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101716125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101716129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101716129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101716129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101716129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101716330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101716334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101716334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101716334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101716335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,405 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=262, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/9162eb0652a34a18b7fa3b4d47192ecc 2024-11-20T11:20:56,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/6b0a57e068164ec6919e37d3aaa235f8 is 50, key is test_row_0/B:col10/1732101655361/Put/seqid=0 2024-11-20T11:20:56,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742197_1373 (size=12251) 2024-11-20T11:20:56,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/6b0a57e068164ec6919e37d3aaa235f8 2024-11-20T11:20:56,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/618b40850f154e01993c6340a7dc6b05 is 50, key is test_row_0/C:col10/1732101655361/Put/seqid=0 2024-11-20T11:20:56,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742198_1374 (size=12251) 2024-11-20T11:20:56,431 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/618b40850f154e01993c6340a7dc6b05 2024-11-20T11:20:56,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/9162eb0652a34a18b7fa3b4d47192ecc as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/9162eb0652a34a18b7fa3b4d47192ecc 2024-11-20T11:20:56,438 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/9162eb0652a34a18b7fa3b4d47192ecc, entries=200, sequenceid=262, filesize=39.0 K 2024-11-20T11:20:56,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/6b0a57e068164ec6919e37d3aaa235f8 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/6b0a57e068164ec6919e37d3aaa235f8 2024-11-20T11:20:56,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/6b0a57e068164ec6919e37d3aaa235f8, entries=150, sequenceid=262, filesize=12.0 K 2024-11-20T11:20:56,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/618b40850f154e01993c6340a7dc6b05 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/618b40850f154e01993c6340a7dc6b05 2024-11-20T11:20:56,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/618b40850f154e01993c6340a7dc6b05, entries=150, sequenceid=262, filesize=12.0 K 2024-11-20T11:20:56,447 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 0470447603cf0ef7bd1ff47e79d9530d in 466ms, sequenceid=262, compaction requested=true 2024-11-20T11:20:56,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:56,448 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:56,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:20:56,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:56,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:20:56,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:56,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:20:56,448 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:56,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:56,449 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102621 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:56,449 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:56,449 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/A is initiating minor compaction (all files) 2024-11-20T11:20:56,449 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/B is initiating minor compaction (all files) 2024-11-20T11:20:56,449 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/A in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:56,449 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/B in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:56,449 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/128b18045ccc4b83bb8a680d7d94156b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/0c9d8bec0dae452da90637ed3930f983, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/9162eb0652a34a18b7fa3b4d47192ecc] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=100.2 K 2024-11-20T11:20:56,449 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:56,449 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/5a34e3705e5b4eb68d814c8927da1bba, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/0f1cc89c879a4a18bc82498260dd6dc6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/6b0a57e068164ec6919e37d3aaa235f8] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=36.2 K 2024-11-20T11:20:56,449 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/128b18045ccc4b83bb8a680d7d94156b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/0c9d8bec0dae452da90637ed3930f983, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/9162eb0652a34a18b7fa3b4d47192ecc] 2024-11-20T11:20:56,450 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 128b18045ccc4b83bb8a680d7d94156b, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732101652076 2024-11-20T11:20:56,450 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a34e3705e5b4eb68d814c8927da1bba, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732101652076 2024-11-20T11:20:56,450 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c9d8bec0dae452da90637ed3930f983, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732101653213 2024-11-20T11:20:56,450 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f1cc89c879a4a18bc82498260dd6dc6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732101653213 2024-11-20T11:20:56,451 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9162eb0652a34a18b7fa3b4d47192ecc, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732101655361 2024-11-20T11:20:56,451 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b0a57e068164ec6919e37d3aaa235f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732101655361 2024-11-20T11:20:56,458 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#B#compaction#312 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:56,458 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/7e003fbce7c3410aad499a81f2c6efd5 is 50, key is test_row_0/B:col10/1732101655361/Put/seqid=0 2024-11-20T11:20:56,461 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:56,464 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411205f4b6d38c6cf4f238a6aca28db32a811_0470447603cf0ef7bd1ff47e79d9530d store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:56,466 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411205f4b6d38c6cf4f238a6aca28db32a811_0470447603cf0ef7bd1ff47e79d9530d, store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:56,466 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205f4b6d38c6cf4f238a6aca28db32a811_0470447603cf0ef7bd1ff47e79d9530d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:56,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742199_1375 (size=12865) 2024-11-20T11:20:56,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742200_1376 (size=4469) 2024-11-20T11:20:56,473 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#A#compaction#313 average throughput is 2.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:56,474 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/29953fd1948b4f9fb990e3ae5cb45655 is 175, key is test_row_0/A:col10/1732101655361/Put/seqid=0 2024-11-20T11:20:56,478 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/7e003fbce7c3410aad499a81f2c6efd5 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/7e003fbce7c3410aad499a81f2c6efd5 2024-11-20T11:20:56,482 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/B of 0470447603cf0ef7bd1ff47e79d9530d into 7e003fbce7c3410aad499a81f2c6efd5(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:56,482 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:56,482 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/B, priority=13, startTime=1732101656448; duration=0sec 2024-11-20T11:20:56,483 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:20:56,483 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:B 2024-11-20T11:20:56,483 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:20:56,484 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:20:56,484 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 0470447603cf0ef7bd1ff47e79d9530d/C is initiating minor compaction (all files) 2024-11-20T11:20:56,484 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0470447603cf0ef7bd1ff47e79d9530d/C in TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:56,484 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/1f9d9aaffd324b739cff327bb8e47026, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/a59eb25824364b338876e086efa80397, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/618b40850f154e01993c6340a7dc6b05] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp, totalSize=36.2 K 2024-11-20T11:20:56,484 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f9d9aaffd324b739cff327bb8e47026, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732101652076 2024-11-20T11:20:56,485 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting a59eb25824364b338876e086efa80397, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732101653213 2024-11-20T11:20:56,485 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 618b40850f154e01993c6340a7dc6b05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732101655361 2024-11-20T11:20:56,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742201_1377 (size=31819) 2024-11-20T11:20:56,492 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0470447603cf0ef7bd1ff47e79d9530d#C#compaction#314 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:20:56,493 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/a9dcbea694b94954b740f4e400e5cb45 is 50, key is test_row_0/C:col10/1732101655361/Put/seqid=0 2024-11-20T11:20:56,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742202_1378 (size=12865) 2024-11-20T11:20:56,508 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/29953fd1948b4f9fb990e3ae5cb45655 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/29953fd1948b4f9fb990e3ae5cb45655 2024-11-20T11:20:56,512 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/A of 0470447603cf0ef7bd1ff47e79d9530d into 29953fd1948b4f9fb990e3ae5cb45655(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:56,512 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:56,512 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/A, priority=13, startTime=1732101656448; duration=0sec 2024-11-20T11:20:56,512 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:56,512 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:A 2024-11-20T11:20:56,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:56,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T11:20:56,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:56,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:56,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:56,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:56,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:56,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:56,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112018f49f83f4e34cbda1e388be1c922a02_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101655992/Put/seqid=0 2024-11-20T11:20:56,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742203_1379 (size=14994) 2024-11-20T11:20:56,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101716657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101716657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101716658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101716659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101716661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101716763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101716763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101716763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101716764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101716764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,902 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/a9dcbea694b94954b740f4e400e5cb45 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/a9dcbea694b94954b740f4e400e5cb45 2024-11-20T11:20:56,906 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0470447603cf0ef7bd1ff47e79d9530d/C of 0470447603cf0ef7bd1ff47e79d9530d into a9dcbea694b94954b740f4e400e5cb45(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:20:56,906 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:56,906 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d., storeName=0470447603cf0ef7bd1ff47e79d9530d/C, priority=13, startTime=1732101656448; duration=0sec 2024-11-20T11:20:56,907 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:20:56,907 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:C 2024-11-20T11:20:56,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101716967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101716967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101716967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101716967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:56,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:56,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101716968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,050 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:57,054 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112018f49f83f4e34cbda1e388be1c922a02_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112018f49f83f4e34cbda1e388be1c922a02_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:57,055 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/e59b82d8d63e425f951d0c2b7c3c7d0b, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:57,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/e59b82d8d63e425f951d0c2b7c3c7d0b is 175, key is test_row_0/A:col10/1732101655992/Put/seqid=0 2024-11-20T11:20:57,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742204_1380 (size=39949) 2024-11-20T11:20:57,272 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:57,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101717271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:57,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101717271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:57,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101717271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:57,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101717273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:57,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101717273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T11:20:57,397 INFO [Thread-1395 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-20T11:20:57,399 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:20:57,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-11-20T11:20:57,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T11:20:57,400 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:20:57,401 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:20:57,401 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:20:57,461 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/e59b82d8d63e425f951d0c2b7c3c7d0b 2024-11-20T11:20:57,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/166b0805914f40658a0452d45d0763d7 is 50, key is test_row_0/B:col10/1732101655992/Put/seqid=0 2024-11-20T11:20:57,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742205_1381 (size=12301) 2024-11-20T11:20:57,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T11:20:57,552 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T11:20:57,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:57,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:57,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:57,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:57,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:57,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:57,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T11:20:57,705 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T11:20:57,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:57,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:57,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:57,706 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:57,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:57,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101717775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101717777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101717777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101717779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101717779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,858 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:57,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T11:20:57,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:57,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:57,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:57,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:57,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:57,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:57,873 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/166b0805914f40658a0452d45d0763d7 2024-11-20T11:20:57,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/9d9dd8e9cc0d4f40a8fc061520a2c1a1 is 50, key is test_row_0/C:col10/1732101655992/Put/seqid=0 2024-11-20T11:20:57,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742206_1382 (size=12301) 2024-11-20T11:20:58,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T11:20:58,011 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:58,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T11:20:58,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:58,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:58,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:58,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:58,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:58,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:58,164 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:58,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T11:20:58,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:58,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:58,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:58,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:58,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:58,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:20:58,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/9d9dd8e9cc0d4f40a8fc061520a2c1a1 2024-11-20T11:20:58,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/e59b82d8d63e425f951d0c2b7c3c7d0b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/e59b82d8d63e425f951d0c2b7c3c7d0b 2024-11-20T11:20:58,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/e59b82d8d63e425f951d0c2b7c3c7d0b, entries=200, sequenceid=278, filesize=39.0 K 2024-11-20T11:20:58,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/166b0805914f40658a0452d45d0763d7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/166b0805914f40658a0452d45d0763d7 2024-11-20T11:20:58,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/166b0805914f40658a0452d45d0763d7, entries=150, sequenceid=278, filesize=12.0 K 2024-11-20T11:20:58,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/9d9dd8e9cc0d4f40a8fc061520a2c1a1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/9d9dd8e9cc0d4f40a8fc061520a2c1a1 2024-11-20T11:20:58,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/9d9dd8e9cc0d4f40a8fc061520a2c1a1, entries=150, sequenceid=278, filesize=12.0 K 2024-11-20T11:20:58,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 0470447603cf0ef7bd1ff47e79d9530d in 1670ms, sequenceid=278, compaction requested=false 2024-11-20T11:20:58,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:58,316 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:58,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T11:20:58,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:58,317 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T11:20:58,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:20:58,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:58,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:20:58,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:58,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:20:58,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:20:58,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120888d89bab36647bd9e8d9567ebde8865_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101656658/Put/seqid=0 2024-11-20T11:20:58,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742207_1383 (size=12454) 2024-11-20T11:20:58,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T11:20:58,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:20:58,738 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120888d89bab36647bd9e8d9567ebde8865_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120888d89bab36647bd9e8d9567ebde8865_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:58,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/cc06f7f67ea34734af8788ce179d3c70, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:20:58,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/cc06f7f67ea34734af8788ce179d3c70 is 175, key is test_row_0/A:col10/1732101656658/Put/seqid=0 2024-11-20T11:20:58,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742208_1384 (size=31255) 2024-11-20T11:20:58,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. as already flushing 2024-11-20T11:20:58,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:20:58,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101718796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:58,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101718796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:58,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101718796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:58,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:58,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101718801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:58,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:58,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101718801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:58,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:58,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101718903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:58,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:58,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101718903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:58,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:58,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101718903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:58,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:58,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101718909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:58,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:58,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101718909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101719107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101719108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101719111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101719112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101719112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,145 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=301, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/cc06f7f67ea34734af8788ce179d3c70 2024-11-20T11:20:59,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/0d7ceba45e914503af67de7cfa7dfa06 is 50, key is test_row_0/B:col10/1732101656658/Put/seqid=0 2024-11-20T11:20:59,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742209_1385 (size=12301) 2024-11-20T11:20:59,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101719411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101719412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101719416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101719416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101719418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T11:20:59,558 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/0d7ceba45e914503af67de7cfa7dfa06 2024-11-20T11:20:59,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/deac7cf7a6e44da1b96458fc19e32093 is 50, key is test_row_0/C:col10/1732101656658/Put/seqid=0 2024-11-20T11:20:59,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742210_1386 (size=12301) 2024-11-20T11:20:59,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50054 deadline: 1732101719919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50078 deadline: 1732101719919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50064 deadline: 1732101719922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50100 deadline: 1732101719921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:20:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50040 deadline: 1732101719925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:20:59,973 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/deac7cf7a6e44da1b96458fc19e32093 2024-11-20T11:20:59,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/cc06f7f67ea34734af8788ce179d3c70 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/cc06f7f67ea34734af8788ce179d3c70 2024-11-20T11:20:59,982 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/cc06f7f67ea34734af8788ce179d3c70, entries=150, sequenceid=301, filesize=30.5 K 2024-11-20T11:20:59,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/0d7ceba45e914503af67de7cfa7dfa06 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/0d7ceba45e914503af67de7cfa7dfa06 2024-11-20T11:20:59,987 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/0d7ceba45e914503af67de7cfa7dfa06, entries=150, sequenceid=301, filesize=12.0 K 2024-11-20T11:20:59,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/deac7cf7a6e44da1b96458fc19e32093 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/deac7cf7a6e44da1b96458fc19e32093 2024-11-20T11:20:59,991 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/deac7cf7a6e44da1b96458fc19e32093, entries=150, sequenceid=301, filesize=12.0 K 2024-11-20T11:20:59,992 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 0470447603cf0ef7bd1ff47e79d9530d in 1674ms, sequenceid=301, compaction requested=true 2024-11-20T11:20:59,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:20:59,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:20:59,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-20T11:20:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-11-20T11:20:59,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-20T11:20:59,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5920 sec 2024-11-20T11:20:59,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 2.5950 sec 2024-11-20T11:21:00,752 DEBUG [Thread-1402 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x798e7fd4 to 127.0.0.1:62733 2024-11-20T11:21:00,752 DEBUG [Thread-1400 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e3203d9 to 127.0.0.1:62733 2024-11-20T11:21:00,752 DEBUG [Thread-1402 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:00,752 DEBUG [Thread-1400 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:00,753 DEBUG [Thread-1398 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x27539bdc to 127.0.0.1:62733 2024-11-20T11:21:00,753 DEBUG [Thread-1398 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:00,755 DEBUG [Thread-1404 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7284f16d to 127.0.0.1:62733 2024-11-20T11:21:00,755 DEBUG [Thread-1404 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:00,757 DEBUG [Thread-1396 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0801ba40 to 127.0.0.1:62733 2024-11-20T11:21:00,757 DEBUG [Thread-1396 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:00,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:00,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T11:21:00,930 DEBUG [Thread-1385 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51f7d511 to 127.0.0.1:62733 2024-11-20T11:21:00,930 DEBUG [Thread-1387 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1dc42ea6 to 127.0.0.1:62733 2024-11-20T11:21:00,930 DEBUG [Thread-1385 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:00,930 DEBUG [Thread-1387 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:00,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:21:00,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:00,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:21:00,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:00,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:21:00,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:00,933 DEBUG [Thread-1393 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31aea41b to 127.0.0.1:62733 2024-11-20T11:21:00,933 DEBUG [Thread-1393 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:00,935 DEBUG [Thread-1391 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6cd96549 to 127.0.0.1:62733 2024-11-20T11:21:00,935 DEBUG [Thread-1391 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:00,937 DEBUG [Thread-1389 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x117e86d9 to 127.0.0.1:62733 2024-11-20T11:21:00,937 DEBUG [Thread-1389 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:00,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205c4b3088d2bc49e39ea52398048ad8d6_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_0/A:col10/1732101660929/Put/seqid=0 2024-11-20T11:21:00,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742211_1387 (size=12454) 2024-11-20T11:21:01,341 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:01,345 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205c4b3088d2bc49e39ea52398048ad8d6_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205c4b3088d2bc49e39ea52398048ad8d6_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:01,346 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/af81e752e52541e1ad9b2f54536a7250, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:21:01,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/af81e752e52541e1ad9b2f54536a7250 is 175, key is test_row_0/A:col10/1732101660929/Put/seqid=0 2024-11-20T11:21:01,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742212_1388 (size=31255) 2024-11-20T11:21:01,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T11:21:01,505 INFO [Thread-1395 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2986 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8956 rows 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2988 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8964 rows 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3006 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9018 rows 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2992 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8976 rows 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3004 2024-11-20T11:21:01,505 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9012 rows 2024-11-20T11:21:01,505 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T11:21:01,505 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5fe71801 to 127.0.0.1:62733 2024-11-20T11:21:01,505 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:01,508 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T11:21:01,509 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T11:21:01,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:01,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T11:21:01,513 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101661512"}]},"ts":"1732101661512"} 2024-11-20T11:21:01,514 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T11:21:01,516 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T11:21:01,516 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T11:21:01,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0470447603cf0ef7bd1ff47e79d9530d, UNASSIGN}] 2024-11-20T11:21:01,517 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0470447603cf0ef7bd1ff47e79d9530d, UNASSIGN 2024-11-20T11:21:01,518 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=0470447603cf0ef7bd1ff47e79d9530d, regionState=CLOSING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:01,519 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T11:21:01,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; CloseRegionProcedure 0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:21:01,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T11:21:01,670 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:01,670 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(124): Close 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:01,670 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T11:21:01,670 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1681): Closing 0470447603cf0ef7bd1ff47e79d9530d, disabling compactions & flushes 2024-11-20T11:21:01,670 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:21:01,750 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=317, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/af81e752e52541e1ad9b2f54536a7250 2024-11-20T11:21:01,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/375f4d2dfce94200b6f016656b27089e is 50, key is test_row_0/B:col10/1732101660929/Put/seqid=0 2024-11-20T11:21:01,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742213_1389 (size=12301) 2024-11-20T11:21:01,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T11:21:02,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T11:21:02,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/375f4d2dfce94200b6f016656b27089e 2024-11-20T11:21:02,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/89e6fd9cb2d44c28b7d6ba2963c36873 is 50, key is test_row_0/C:col10/1732101660929/Put/seqid=0 2024-11-20T11:21:02,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742214_1390 (size=12301) 2024-11-20T11:21:02,570 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/89e6fd9cb2d44c28b7d6ba2963c36873 2024-11-20T11:21:02,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/af81e752e52541e1ad9b2f54536a7250 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/af81e752e52541e1ad9b2f54536a7250 2024-11-20T11:21:02,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/af81e752e52541e1ad9b2f54536a7250, entries=150, sequenceid=317, filesize=30.5 K 2024-11-20T11:21:02,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/375f4d2dfce94200b6f016656b27089e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/375f4d2dfce94200b6f016656b27089e 2024-11-20T11:21:02,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/375f4d2dfce94200b6f016656b27089e, entries=150, sequenceid=317, filesize=12.0 K 2024-11-20T11:21:02,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/89e6fd9cb2d44c28b7d6ba2963c36873 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/89e6fd9cb2d44c28b7d6ba2963c36873 2024-11-20T11:21:02,583 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/89e6fd9cb2d44c28b7d6ba2963c36873, entries=150, sequenceid=317, filesize=12.0 K 2024-11-20T11:21:02,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=20.13 KB/20610 for 0470447603cf0ef7bd1ff47e79d9530d in 1653ms, sequenceid=317, compaction requested=true 2024-11-20T11:21:02,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:21:02,583 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:21:02,583 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:21:02,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:02,584 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. after waiting 0 ms 2024-11-20T11:21:02,584 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:21:02,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:02,584 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. because compaction request was cancelled 2024-11-20T11:21:02,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:02,584 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:A 2024-11-20T11:21:02,584 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. because compaction request was cancelled 2024-11-20T11:21:02,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:02,584 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(2837): Flushing 0470447603cf0ef7bd1ff47e79d9530d 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-20T11:21:02,584 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:B 2024-11-20T11:21:02,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0470447603cf0ef7bd1ff47e79d9530d:C, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:02,584 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. because compaction request was cancelled 2024-11-20T11:21:02,584 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0470447603cf0ef7bd1ff47e79d9530d:C 2024-11-20T11:21:02,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:02,584 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=A 2024-11-20T11:21:02,584 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:02,584 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=B 2024-11-20T11:21:02,584 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:02,584 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0470447603cf0ef7bd1ff47e79d9530d, store=C 2024-11-20T11:21:02,584 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:02,588 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a0eb94571c3249209faae8118aaa3f81_0470447603cf0ef7bd1ff47e79d9530d is 50, key is test_row_1/A:col10/1732101660936/Put/seqid=0 2024-11-20T11:21:02,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742215_1391 (size=9914) 2024-11-20T11:21:02,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T11:21:02,992 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:02,995 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a0eb94571c3249209faae8118aaa3f81_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a0eb94571c3249209faae8118aaa3f81_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:02,995 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/6086e62488684fec967b673667a6d308, store: [table=TestAcidGuarantees family=A region=0470447603cf0ef7bd1ff47e79d9530d] 2024-11-20T11:21:02,996 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/6086e62488684fec967b673667a6d308 is 175, key is test_row_1/A:col10/1732101660936/Put/seqid=0 2024-11-20T11:21:02,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742216_1392 (size=22561) 2024-11-20T11:21:03,400 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=323, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/6086e62488684fec967b673667a6d308 2024-11-20T11:21:03,406 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/5d9000b4406e4e818774fd58657bfc81 is 50, key is test_row_1/B:col10/1732101660936/Put/seqid=0 2024-11-20T11:21:03,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742217_1393 (size=9857) 2024-11-20T11:21:03,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T11:21:03,810 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/5d9000b4406e4e818774fd58657bfc81 2024-11-20T11:21:03,815 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/524c487e26954e2bb17640f56e981226 is 50, key is test_row_1/C:col10/1732101660936/Put/seqid=0 2024-11-20T11:21:03,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742218_1394 (size=9857) 2024-11-20T11:21:04,219 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/524c487e26954e2bb17640f56e981226 2024-11-20T11:21:04,223 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/A/6086e62488684fec967b673667a6d308 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/6086e62488684fec967b673667a6d308 2024-11-20T11:21:04,226 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/6086e62488684fec967b673667a6d308, entries=100, sequenceid=323, filesize=22.0 K 2024-11-20T11:21:04,227 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/B/5d9000b4406e4e818774fd58657bfc81 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/5d9000b4406e4e818774fd58657bfc81 2024-11-20T11:21:04,230 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/5d9000b4406e4e818774fd58657bfc81, entries=100, sequenceid=323, filesize=9.6 K 2024-11-20T11:21:04,231 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/.tmp/C/524c487e26954e2bb17640f56e981226 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/524c487e26954e2bb17640f56e981226 2024-11-20T11:21:04,234 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/524c487e26954e2bb17640f56e981226, entries=100, sequenceid=323, filesize=9.6 K 2024-11-20T11:21:04,235 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 0470447603cf0ef7bd1ff47e79d9530d in 1651ms, sequenceid=323, compaction requested=true 2024-11-20T11:21:04,235 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bffaacdffa466caca9f8d4ed5db518, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/4530add9b18a4195a428fe19314b1a0f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/5854cf6583564cb3aa47df6ea52f7448, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/7e7e503272e746fea4f7f5c6d6ca48c1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/fafeeefbc0644efb92fc2a2059762c58, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/8028b984dec7451a9ec0471bdf8894aa, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/ba2fb839fdf348b5a1a71eff143d94ff, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/28b89fab405f4c62ad840197336c0389, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/62395fb3697e4ad98eaff04175b117a5, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/102fc35f6feb438ba0a30f680c01b2d9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/eb4184a235694b529832b22988535ec7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bbe95482584fb8a840ce258f64e176, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/26461cf64a9f44ba856eea6d557252f7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/874307cb626d493ba71f26cbbf3f31a9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/1a936cce6de341138f77c9b9c2fcf328, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/128b18045ccc4b83bb8a680d7d94156b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/0c9d8bec0dae452da90637ed3930f983, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/9162eb0652a34a18b7fa3b4d47192ecc] to archive 2024-11-20T11:21:04,236 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:21:04,237 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bffaacdffa466caca9f8d4ed5db518 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bffaacdffa466caca9f8d4ed5db518 2024-11-20T11:21:04,238 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/4530add9b18a4195a428fe19314b1a0f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/4530add9b18a4195a428fe19314b1a0f 2024-11-20T11:21:04,239 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/5854cf6583564cb3aa47df6ea52f7448 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/5854cf6583564cb3aa47df6ea52f7448 2024-11-20T11:21:04,240 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/7e7e503272e746fea4f7f5c6d6ca48c1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/7e7e503272e746fea4f7f5c6d6ca48c1 2024-11-20T11:21:04,241 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/fafeeefbc0644efb92fc2a2059762c58 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/fafeeefbc0644efb92fc2a2059762c58 2024-11-20T11:21:04,242 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/8028b984dec7451a9ec0471bdf8894aa to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/8028b984dec7451a9ec0471bdf8894aa 2024-11-20T11:21:04,243 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/ba2fb839fdf348b5a1a71eff143d94ff to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/ba2fb839fdf348b5a1a71eff143d94ff 2024-11-20T11:21:04,243 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/28b89fab405f4c62ad840197336c0389 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/28b89fab405f4c62ad840197336c0389 2024-11-20T11:21:04,244 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/62395fb3697e4ad98eaff04175b117a5 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/62395fb3697e4ad98eaff04175b117a5 2024-11-20T11:21:04,245 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/102fc35f6feb438ba0a30f680c01b2d9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/102fc35f6feb438ba0a30f680c01b2d9 2024-11-20T11:21:04,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/eb4184a235694b529832b22988535ec7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/eb4184a235694b529832b22988535ec7 2024-11-20T11:21:04,247 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bbe95482584fb8a840ce258f64e176 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/99bbe95482584fb8a840ce258f64e176 2024-11-20T11:21:04,248 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/26461cf64a9f44ba856eea6d557252f7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/26461cf64a9f44ba856eea6d557252f7 2024-11-20T11:21:04,248 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/874307cb626d493ba71f26cbbf3f31a9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/874307cb626d493ba71f26cbbf3f31a9 2024-11-20T11:21:04,249 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/1a936cce6de341138f77c9b9c2fcf328 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/1a936cce6de341138f77c9b9c2fcf328 2024-11-20T11:21:04,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/128b18045ccc4b83bb8a680d7d94156b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/128b18045ccc4b83bb8a680d7d94156b 2024-11-20T11:21:04,251 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/0c9d8bec0dae452da90637ed3930f983 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/0c9d8bec0dae452da90637ed3930f983 2024-11-20T11:21:04,252 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/9162eb0652a34a18b7fa3b4d47192ecc to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/9162eb0652a34a18b7fa3b4d47192ecc 2024-11-20T11:21:04,253 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/b0ee536bddca47bf8bd5526bdde99c32, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/ce57e938a10240ee8e5d34d94b2b4eb2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/7c37fc5ad50a441b944b0c163c75744f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/adec9dfab7a6492daf76ce072d8eca13, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/df9adf14c91941b0b8dfaf75e9aab155, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/e3351963cf564d3e8fb33f2a83e6c13a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/6c7b380115194f4396b89f539ca7cba2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/008ceabe7df54ff1b038d439f2e79e7e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/8d72375dba2844ea891e6bdcf63d28b4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/471238cdb572431080c9112c04d7d62a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/b45bdea323b2478a9bc584fcd0f4ab81, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/2cea8c8160e24089ac2f6969e7f1ee19, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/f4f6319acb584a74b9b721384ac16f8a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/75e94e8862754d0bbdef8d339d535622, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/5a34e3705e5b4eb68d814c8927da1bba, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/95c98c4449d648f5bed4703e39a827b2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/0f1cc89c879a4a18bc82498260dd6dc6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/6b0a57e068164ec6919e37d3aaa235f8] to archive 2024-11-20T11:21:04,253 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:21:04,255 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/b0ee536bddca47bf8bd5526bdde99c32 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/b0ee536bddca47bf8bd5526bdde99c32 2024-11-20T11:21:04,255 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/ce57e938a10240ee8e5d34d94b2b4eb2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/ce57e938a10240ee8e5d34d94b2b4eb2 2024-11-20T11:21:04,256 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/7c37fc5ad50a441b944b0c163c75744f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/7c37fc5ad50a441b944b0c163c75744f 2024-11-20T11:21:04,257 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/adec9dfab7a6492daf76ce072d8eca13 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/adec9dfab7a6492daf76ce072d8eca13 2024-11-20T11:21:04,258 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/df9adf14c91941b0b8dfaf75e9aab155 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/df9adf14c91941b0b8dfaf75e9aab155 2024-11-20T11:21:04,259 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/e3351963cf564d3e8fb33f2a83e6c13a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/e3351963cf564d3e8fb33f2a83e6c13a 2024-11-20T11:21:04,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/6c7b380115194f4396b89f539ca7cba2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/6c7b380115194f4396b89f539ca7cba2 2024-11-20T11:21:04,261 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/008ceabe7df54ff1b038d439f2e79e7e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/008ceabe7df54ff1b038d439f2e79e7e 2024-11-20T11:21:04,261 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/8d72375dba2844ea891e6bdcf63d28b4 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/8d72375dba2844ea891e6bdcf63d28b4 2024-11-20T11:21:04,262 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/471238cdb572431080c9112c04d7d62a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/471238cdb572431080c9112c04d7d62a 2024-11-20T11:21:04,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/b45bdea323b2478a9bc584fcd0f4ab81 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/b45bdea323b2478a9bc584fcd0f4ab81 2024-11-20T11:21:04,264 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/2cea8c8160e24089ac2f6969e7f1ee19 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/2cea8c8160e24089ac2f6969e7f1ee19 2024-11-20T11:21:04,265 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/f4f6319acb584a74b9b721384ac16f8a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/f4f6319acb584a74b9b721384ac16f8a 2024-11-20T11:21:04,265 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/75e94e8862754d0bbdef8d339d535622 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/75e94e8862754d0bbdef8d339d535622 2024-11-20T11:21:04,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/5a34e3705e5b4eb68d814c8927da1bba to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/5a34e3705e5b4eb68d814c8927da1bba 2024-11-20T11:21:04,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/95c98c4449d648f5bed4703e39a827b2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/95c98c4449d648f5bed4703e39a827b2 2024-11-20T11:21:04,268 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/0f1cc89c879a4a18bc82498260dd6dc6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/0f1cc89c879a4a18bc82498260dd6dc6 2024-11-20T11:21:04,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/6b0a57e068164ec6919e37d3aaa235f8 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/6b0a57e068164ec6919e37d3aaa235f8 2024-11-20T11:21:04,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/6e0289b797034fec89a71d41eb85f0d8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/66546e2dede14596aafffde066642a7c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/512f1e951ff8489a84af94a05b6af1f4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/16e3a910bccb4a3e87dfb8ef8642cdf1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/b13ea554e6034b2db220d9388f91260e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/ec79aa93c7ab471fba8ddf8aa9b7f044, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/51ebd98610f7482a8d7e051533a35a19, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/35b86407f4bb46b883ba1bfa47242ffb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c5148b9e3adf4671a23dff7c0f5c4022, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/362eea827395459ab211d94a67a52452, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c629a2a5602a4a8f995f5eff749c46ae, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/abaf854956ff410387f756f0017caf5f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/3d110159a0be4099afe00d0fbfd3b64a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c7c202ac432a4ebcab07e37eafa8b809, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/1f9d9aaffd324b739cff327bb8e47026, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/6b58a701886c4a3d81cf0a996590bcc2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/a59eb25824364b338876e086efa80397, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/618b40850f154e01993c6340a7dc6b05] to archive 2024-11-20T11:21:04,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:21:04,272 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/6e0289b797034fec89a71d41eb85f0d8 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/6e0289b797034fec89a71d41eb85f0d8 2024-11-20T11:21:04,273 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/66546e2dede14596aafffde066642a7c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/66546e2dede14596aafffde066642a7c 2024-11-20T11:21:04,274 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/512f1e951ff8489a84af94a05b6af1f4 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/512f1e951ff8489a84af94a05b6af1f4 2024-11-20T11:21:04,275 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/16e3a910bccb4a3e87dfb8ef8642cdf1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/16e3a910bccb4a3e87dfb8ef8642cdf1 2024-11-20T11:21:04,275 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/b13ea554e6034b2db220d9388f91260e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/b13ea554e6034b2db220d9388f91260e 2024-11-20T11:21:04,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/ec79aa93c7ab471fba8ddf8aa9b7f044 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/ec79aa93c7ab471fba8ddf8aa9b7f044 2024-11-20T11:21:04,277 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/51ebd98610f7482a8d7e051533a35a19 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/51ebd98610f7482a8d7e051533a35a19 2024-11-20T11:21:04,278 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/35b86407f4bb46b883ba1bfa47242ffb to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/35b86407f4bb46b883ba1bfa47242ffb 2024-11-20T11:21:04,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c5148b9e3adf4671a23dff7c0f5c4022 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c5148b9e3adf4671a23dff7c0f5c4022 2024-11-20T11:21:04,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/362eea827395459ab211d94a67a52452 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/362eea827395459ab211d94a67a52452 2024-11-20T11:21:04,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c629a2a5602a4a8f995f5eff749c46ae to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c629a2a5602a4a8f995f5eff749c46ae 2024-11-20T11:21:04,282 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/abaf854956ff410387f756f0017caf5f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/abaf854956ff410387f756f0017caf5f 2024-11-20T11:21:04,282 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/3d110159a0be4099afe00d0fbfd3b64a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/3d110159a0be4099afe00d0fbfd3b64a 2024-11-20T11:21:04,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c7c202ac432a4ebcab07e37eafa8b809 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/c7c202ac432a4ebcab07e37eafa8b809 2024-11-20T11:21:04,284 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/1f9d9aaffd324b739cff327bb8e47026 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/1f9d9aaffd324b739cff327bb8e47026 2024-11-20T11:21:04,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/6b58a701886c4a3d81cf0a996590bcc2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/6b58a701886c4a3d81cf0a996590bcc2 2024-11-20T11:21:04,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/a59eb25824364b338876e086efa80397 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/a59eb25824364b338876e086efa80397 2024-11-20T11:21:04,287 DEBUG [StoreCloser-TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/618b40850f154e01993c6340a7dc6b05 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/618b40850f154e01993c6340a7dc6b05 2024-11-20T11:21:04,290 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/recovered.edits/326.seqid, newMaxSeqId=326, maxSeqId=4 2024-11-20T11:21:04,291 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d. 2024-11-20T11:21:04,291 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1635): Region close journal for 0470447603cf0ef7bd1ff47e79d9530d: 2024-11-20T11:21:04,292 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(170): Closed 0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:04,292 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=0470447603cf0ef7bd1ff47e79d9530d, regionState=CLOSED 2024-11-20T11:21:04,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-20T11:21:04,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseRegionProcedure 0470447603cf0ef7bd1ff47e79d9530d, server=ee8338ed7cc0,35185,1732101546666 in 2.7740 sec 2024-11-20T11:21:04,295 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-11-20T11:21:04,295 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0470447603cf0ef7bd1ff47e79d9530d, UNASSIGN in 2.7770 sec 2024-11-20T11:21:04,296 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-20T11:21:04,297 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.7800 sec 2024-11-20T11:21:04,297 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101664297"}]},"ts":"1732101664297"} 2024-11-20T11:21:04,298 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T11:21:04,300 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T11:21:04,301 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.7910 sec 2024-11-20T11:21:05,056 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T11:21:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T11:21:05,616 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-20T11:21:05,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T11:21:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:05,618 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:05,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T11:21:05,618 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=128, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:05,620 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,621 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/recovered.edits] 2024-11-20T11:21:05,623 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/29953fd1948b4f9fb990e3ae5cb45655 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/29953fd1948b4f9fb990e3ae5cb45655 2024-11-20T11:21:05,624 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/6086e62488684fec967b673667a6d308 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/6086e62488684fec967b673667a6d308 2024-11-20T11:21:05,625 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/af81e752e52541e1ad9b2f54536a7250 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/af81e752e52541e1ad9b2f54536a7250 2024-11-20T11:21:05,626 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/cc06f7f67ea34734af8788ce179d3c70 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/cc06f7f67ea34734af8788ce179d3c70 2024-11-20T11:21:05,627 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/e59b82d8d63e425f951d0c2b7c3c7d0b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/A/e59b82d8d63e425f951d0c2b7c3c7d0b 2024-11-20T11:21:05,628 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/0d7ceba45e914503af67de7cfa7dfa06 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/0d7ceba45e914503af67de7cfa7dfa06 2024-11-20T11:21:05,629 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/166b0805914f40658a0452d45d0763d7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/166b0805914f40658a0452d45d0763d7 2024-11-20T11:21:05,630 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/375f4d2dfce94200b6f016656b27089e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/375f4d2dfce94200b6f016656b27089e 2024-11-20T11:21:05,631 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/5d9000b4406e4e818774fd58657bfc81 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/5d9000b4406e4e818774fd58657bfc81 2024-11-20T11:21:05,632 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/7e003fbce7c3410aad499a81f2c6efd5 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/B/7e003fbce7c3410aad499a81f2c6efd5 2024-11-20T11:21:05,633 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/524c487e26954e2bb17640f56e981226 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/524c487e26954e2bb17640f56e981226 2024-11-20T11:21:05,634 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/89e6fd9cb2d44c28b7d6ba2963c36873 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/89e6fd9cb2d44c28b7d6ba2963c36873 2024-11-20T11:21:05,635 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/9d9dd8e9cc0d4f40a8fc061520a2c1a1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/9d9dd8e9cc0d4f40a8fc061520a2c1a1 2024-11-20T11:21:05,636 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/a9dcbea694b94954b740f4e400e5cb45 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/a9dcbea694b94954b740f4e400e5cb45 2024-11-20T11:21:05,636 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/deac7cf7a6e44da1b96458fc19e32093 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/C/deac7cf7a6e44da1b96458fc19e32093 2024-11-20T11:21:05,639 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/recovered.edits/326.seqid to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d/recovered.edits/326.seqid 2024-11-20T11:21:05,639 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,639 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T11:21:05,639 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T11:21:05,640 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T11:21:05,642 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120049f99f687464b71b4342dee88495feb_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120049f99f687464b71b4342dee88495feb_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,643 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120126ff3bb9ce341d7821b57cd8506477c_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120126ff3bb9ce341d7821b57cd8506477c_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,644 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112014d0ec403eef42c5a3ede8439469ed8d_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112014d0ec403eef42c5a3ede8439469ed8d_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,645 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112018f49f83f4e34cbda1e388be1c922a02_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112018f49f83f4e34cbda1e388be1c922a02_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,646 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112030e65e6d4bb9491b959859cbf9b98c07_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112030e65e6d4bb9491b959859cbf9b98c07_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,646 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205c4b3088d2bc49e39ea52398048ad8d6_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205c4b3088d2bc49e39ea52398048ad8d6_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,647 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206df914d0e52446188ac63d6e39acf194_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206df914d0e52446188ac63d6e39acf194_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,648 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120739e05f23f424a84a9aecad87e0f2092_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120739e05f23f424a84a9aecad87e0f2092_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,649 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207c8063324dd64028aeef9ef3bad4c2ab_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207c8063324dd64028aeef9ef3bad4c2ab_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,650 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120888d89bab36647bd9e8d9567ebde8865_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120888d89bab36647bd9e8d9567ebde8865_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,651 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a0eb94571c3249209faae8118aaa3f81_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a0eb94571c3249209faae8118aaa3f81_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,652 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a2a67e39e2214fb5b6f6a016ce994ec4_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a2a67e39e2214fb5b6f6a016ce994ec4_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,653 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a5c6b8e879a04eeeafde52287b9cbb00_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a5c6b8e879a04eeeafde52287b9cbb00_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,654 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a8f389f7b8e74853bec349bc73a680f5_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a8f389f7b8e74853bec349bc73a680f5_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,655 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bd389848acf54a8f894785b2d6d28aab_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bd389848acf54a8f894785b2d6d28aab_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,656 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cadccb154d274125bf482d880e821eaf_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cadccb154d274125bf482d880e821eaf_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,657 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cd139b4a459746218f1f9972b64dd59e_0470447603cf0ef7bd1ff47e79d9530d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cd139b4a459746218f1f9972b64dd59e_0470447603cf0ef7bd1ff47e79d9530d 2024-11-20T11:21:05,657 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T11:21:05,659 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=128, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:05,660 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T11:21:05,662 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T11:21:05,663 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=128, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:05,663 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T11:21:05,663 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732101665663"}]},"ts":"9223372036854775807"} 2024-11-20T11:21:05,665 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T11:21:05,665 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 0470447603cf0ef7bd1ff47e79d9530d, NAME => 'TestAcidGuarantees,,1732101637693.0470447603cf0ef7bd1ff47e79d9530d.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T11:21:05,665 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T11:21:05,665 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732101665665"}]},"ts":"9223372036854775807"} 2024-11-20T11:21:05,667 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T11:21:05,669 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=128, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:05,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 52 msec 2024-11-20T11:21:05,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T11:21:05,719 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-20T11:21:05,728 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=239 (was 238) - Thread LEAK? -, OpenFileDescriptor=451 (was 451), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=284 (was 320), ProcessCount=11 (was 11), AvailableMemoryMB=5797 (was 5845) 2024-11-20T11:21:05,737 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=239, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=284, ProcessCount=11, AvailableMemoryMB=5796 2024-11-20T11:21:05,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T11:21:05,738 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T11:21:05,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=129, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:05,740 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=129, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T11:21:05,740 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:05,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 129 2024-11-20T11:21:05,741 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=129, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T11:21:05,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-11-20T11:21:05,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742219_1395 (size=960) 2024-11-20T11:21:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-11-20T11:21:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-11-20T11:21:06,147 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830 2024-11-20T11:21:06,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742220_1396 (size=53) 2024-11-20T11:21:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-11-20T11:21:06,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T11:21:06,553 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:21:06,553 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 2bb1124baa057df845a5f13f3b500be1, disabling compactions & flushes 2024-11-20T11:21:06,553 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:06,553 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:06,553 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. after waiting 0 ms 2024-11-20T11:21:06,553 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:06,553 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:06,553 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:06,554 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=129, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T11:21:06,554 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732101666554"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732101666554"}]},"ts":"1732101666554"} 2024-11-20T11:21:06,555 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T11:21:06,556 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=129, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T11:21:06,556 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101666556"}]},"ts":"1732101666556"} 2024-11-20T11:21:06,557 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T11:21:06,560 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2bb1124baa057df845a5f13f3b500be1, ASSIGN}] 2024-11-20T11:21:06,561 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2bb1124baa057df845a5f13f3b500be1, ASSIGN 2024-11-20T11:21:06,561 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2bb1124baa057df845a5f13f3b500be1, ASSIGN; state=OFFLINE, location=ee8338ed7cc0,35185,1732101546666; forceNewPlan=false, retain=false 2024-11-20T11:21:06,712 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=130 updating hbase:meta row=2bb1124baa057df845a5f13f3b500be1, regionState=OPENING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:06,713 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; OpenRegionProcedure 2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:21:06,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-11-20T11:21:06,864 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:06,866 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:06,866 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(7285): Opening region: {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} 2024-11-20T11:21:06,867 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:06,867 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:21:06,867 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(7327): checking encryption for 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:06,867 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(7330): checking classloading for 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:06,868 INFO [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:06,869 INFO [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:21:06,869 INFO [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2bb1124baa057df845a5f13f3b500be1 columnFamilyName A 2024-11-20T11:21:06,869 DEBUG [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:06,870 INFO [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] regionserver.HStore(327): Store=2bb1124baa057df845a5f13f3b500be1/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:21:06,870 INFO [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:06,871 INFO [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:21:06,871 INFO [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2bb1124baa057df845a5f13f3b500be1 columnFamilyName B 2024-11-20T11:21:06,871 DEBUG [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:06,871 INFO [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] regionserver.HStore(327): Store=2bb1124baa057df845a5f13f3b500be1/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:21:06,871 INFO [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:06,872 INFO [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:21:06,872 INFO [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2bb1124baa057df845a5f13f3b500be1 columnFamilyName C 2024-11-20T11:21:06,872 DEBUG [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:06,873 INFO [StoreOpener-2bb1124baa057df845a5f13f3b500be1-1 {}] regionserver.HStore(327): Store=2bb1124baa057df845a5f13f3b500be1/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:21:06,873 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:06,873 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:06,874 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:06,875 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T11:21:06,875 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(1085): writing seq id for 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:06,877 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T11:21:06,877 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(1102): Opened 2bb1124baa057df845a5f13f3b500be1; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73319583, jitterRate=0.09254692494869232}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T11:21:06,878 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(1001): Region open journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:06,878 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., pid=131, masterSystemTime=1732101666864 2024-11-20T11:21:06,879 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:06,879 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:06,880 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=130 updating hbase:meta row=2bb1124baa057df845a5f13f3b500be1, regionState=OPEN, openSeqNum=2, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:06,881 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-20T11:21:06,881 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; OpenRegionProcedure 2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 in 167 msec 2024-11-20T11:21:06,882 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-11-20T11:21:06,882 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2bb1124baa057df845a5f13f3b500be1, ASSIGN in 321 msec 2024-11-20T11:21:06,883 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=129, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T11:21:06,883 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101666883"}]},"ts":"1732101666883"} 2024-11-20T11:21:06,883 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T11:21:06,886 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=129, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T11:21:06,886 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1480 sec 2024-11-20T11:21:07,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-11-20T11:21:07,845 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 129 completed 2024-11-20T11:21:07,846 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x022a6e9f to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c60eb7d 2024-11-20T11:21:07,849 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695c2253, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:07,850 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:07,851 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:07,852 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T11:21:07,853 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40214, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T11:21:07,855 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32c12a30 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79b10416 2024-11-20T11:21:07,858 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7177efc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:07,858 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ef40578 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f142b04 2024-11-20T11:21:07,861 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61d38088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:07,862 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x032bb71c to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@de9f076 2024-11-20T11:21:07,864 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7043f683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:07,865 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06bc0f7c to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4414259d 2024-11-20T11:21:07,867 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0c2472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:07,868 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8b6e04 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ed69825 2024-11-20T11:21:07,871 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b30c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:07,872 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11193a0c to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d672ed2 2024-11-20T11:21:07,874 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f7c40ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:07,875 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7861b162 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cf40102 2024-11-20T11:21:07,878 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b0e7b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:07,878 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x154f0f85 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@496fe03f 2024-11-20T11:21:07,881 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f2423f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:07,882 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x008a917b to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3652e74d 2024-11-20T11:21:07,885 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184771cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:07,885 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x054c2725 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2405c04e 2024-11-20T11:21:07,888 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76f0408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:07,891 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:07,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-20T11:21:07,892 DEBUG [hconnection-0x5c8afdc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:07,892 DEBUG [hconnection-0x5f949549-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:07,893 DEBUG [hconnection-0x2019d83b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:07,893 DEBUG [hconnection-0x1f6063a7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:07,893 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T11:21:07,893 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:07,894 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:07,894 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:07,894 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49164, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:07,894 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:07,894 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:07,896 DEBUG [hconnection-0x316d01f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:07,896 DEBUG [hconnection-0x52c4a287-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:07,897 DEBUG [hconnection-0x4f2d7256-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:07,897 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49196, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:07,897 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49210, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:07,898 DEBUG [hconnection-0x667a6c66-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:07,898 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49212, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:07,898 DEBUG [hconnection-0x4a465842-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:07,899 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49218, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:07,901 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:07,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T11:21:07,903 DEBUG [hconnection-0x622474e5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:07,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:07,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:07,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:07,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:07,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:07,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:07,903 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49224, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:07,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:07,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101727920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101727919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:07,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101727921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:07,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:07,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101727921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101727921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:07,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/496592e6d57145319e43309bb60d2c79 is 50, key is test_row_0/A:col10/1732101667902/Put/seqid=0 2024-11-20T11:21:07,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742221_1397 (size=12001) 2024-11-20T11:21:07,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T11:21:08,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101728022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101728022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101728022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101728022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101728022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,045 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T11:21:08,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:08,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:08,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:08,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T11:21:08,198 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T11:21:08,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:08,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:08,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:08,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101728225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101728226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101728225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101728226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101728227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/496592e6d57145319e43309bb60d2c79 2024-11-20T11:21:08,350 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T11:21:08,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:08,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:08,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:08,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,355 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/8ebab740165545bcb1f43949b9b403d6 is 50, key is test_row_0/B:col10/1732101667902/Put/seqid=0 2024-11-20T11:21:08,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742222_1398 (size=12001) 2024-11-20T11:21:08,359 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/8ebab740165545bcb1f43949b9b403d6 2024-11-20T11:21:08,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/ff99e61c52434257a548e4cce19e9c71 is 50, key is test_row_0/C:col10/1732101667902/Put/seqid=0 2024-11-20T11:21:08,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742223_1399 (size=12001) 2024-11-20T11:21:08,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T11:21:08,503 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T11:21:08,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:08,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:08,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:08,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101728530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101728530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101728531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101728531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:08,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101728533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,656 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T11:21:08,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:08,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:08,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:08,657 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:08,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/ff99e61c52434257a548e4cce19e9c71 2024-11-20T11:21:08,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/496592e6d57145319e43309bb60d2c79 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/496592e6d57145319e43309bb60d2c79 2024-11-20T11:21:08,791 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/496592e6d57145319e43309bb60d2c79, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T11:21:08,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/8ebab740165545bcb1f43949b9b403d6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/8ebab740165545bcb1f43949b9b403d6 2024-11-20T11:21:08,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/8ebab740165545bcb1f43949b9b403d6, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T11:21:08,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/ff99e61c52434257a548e4cce19e9c71 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ff99e61c52434257a548e4cce19e9c71 2024-11-20T11:21:08,799 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ff99e61c52434257a548e4cce19e9c71, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T11:21:08,800 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 2bb1124baa057df845a5f13f3b500be1 in 898ms, sequenceid=15, compaction requested=false 2024-11-20T11:21:08,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:08,808 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:08,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T11:21:08,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:08,809 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T11:21:08,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:08,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:08,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:08,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:08,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:08,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:08,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/adf8b4a1eed547c1914af0045b692e79 is 50, key is test_row_0/A:col10/1732101667919/Put/seqid=0 2024-11-20T11:21:08,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742224_1400 (size=12001) 2024-11-20T11:21:08,817 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/adf8b4a1eed547c1914af0045b692e79 2024-11-20T11:21:08,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/c863a86bf56b4999b145bbadc84cd402 is 50, key is test_row_0/B:col10/1732101667919/Put/seqid=0 2024-11-20T11:21:08,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742225_1401 (size=12001) 2024-11-20T11:21:08,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T11:21:09,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:09,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:09,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101729070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101729070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101729070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101729071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101729075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101729176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101729176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101729176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101729177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101729180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,228 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/c863a86bf56b4999b145bbadc84cd402 2024-11-20T11:21:09,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/1ba4a825944c45d4bb9588c52d309067 is 50, key is test_row_0/C:col10/1732101667919/Put/seqid=0 2024-11-20T11:21:09,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742226_1402 (size=12001) 2024-11-20T11:21:09,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101729380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101729381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101729381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101729381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101729385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,638 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/1ba4a825944c45d4bb9588c52d309067 2024-11-20T11:21:09,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/adf8b4a1eed547c1914af0045b692e79 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/adf8b4a1eed547c1914af0045b692e79 2024-11-20T11:21:09,646 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/adf8b4a1eed547c1914af0045b692e79, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T11:21:09,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/c863a86bf56b4999b145bbadc84cd402 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/c863a86bf56b4999b145bbadc84cd402 2024-11-20T11:21:09,650 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/c863a86bf56b4999b145bbadc84cd402, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T11:21:09,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/1ba4a825944c45d4bb9588c52d309067 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/1ba4a825944c45d4bb9588c52d309067 2024-11-20T11:21:09,654 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/1ba4a825944c45d4bb9588c52d309067, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T11:21:09,655 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 2bb1124baa057df845a5f13f3b500be1 in 846ms, sequenceid=37, compaction requested=false 2024-11-20T11:21:09,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:09,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:09,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-20T11:21:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-20T11:21:09,658 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-20T11:21:09,658 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7630 sec 2024-11-20T11:21:09,659 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.7670 sec 2024-11-20T11:21:09,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:09,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T11:21:09,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:09,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:09,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:09,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:09,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:09,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:09,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/48c5613d68c143d684163a45847e115a is 50, key is test_row_0/A:col10/1732101669069/Put/seqid=0 2024-11-20T11:21:09,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742227_1403 (size=16681) 2024-11-20T11:21:09,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101729705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101729706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101729706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101729708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101729711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101729812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101729812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101729812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,816 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T11:21:09,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101729815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:09,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101729815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:09,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T11:21:09,997 INFO [Thread-1798 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-20T11:21:09,999 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:09,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-20T11:21:10,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T11:21:10,000 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:10,001 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:10,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:10,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101730016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101730017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101730017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101730017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101730018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/48c5613d68c143d684163a45847e115a 2024-11-20T11:21:10,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T11:21:10,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/9cd36941004740dc83d0438a59d13667 is 50, key is test_row_0/B:col10/1732101669069/Put/seqid=0 2024-11-20T11:21:10,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742228_1404 (size=12001) 2024-11-20T11:21:10,152 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T11:21:10,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:10,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T11:21:10,305 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T11:21:10,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:10,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101730320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101730320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101730320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101730321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101730324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,458 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,459 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T11:21:10,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:10,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/9cd36941004740dc83d0438a59d13667 2024-11-20T11:21:10,515 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/ec21920b37e24af0addedf1085e658cb is 50, key is test_row_0/C:col10/1732101669069/Put/seqid=0 2024-11-20T11:21:10,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742229_1405 (size=12001) 2024-11-20T11:21:10,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T11:21:10,611 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T11:21:10,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:10,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,763 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T11:21:10,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:10,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,765 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101730825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101730826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101730827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101730827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:10,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101730829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,916 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:10,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T11:21:10,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:10,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,917 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:10,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/ec21920b37e24af0addedf1085e658cb 2024-11-20T11:21:10,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/48c5613d68c143d684163a45847e115a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/48c5613d68c143d684163a45847e115a 2024-11-20T11:21:10,926 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/48c5613d68c143d684163a45847e115a, entries=250, sequenceid=53, filesize=16.3 K 2024-11-20T11:21:10,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/9cd36941004740dc83d0438a59d13667 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/9cd36941004740dc83d0438a59d13667 2024-11-20T11:21:10,930 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/9cd36941004740dc83d0438a59d13667, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T11:21:10,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/ec21920b37e24af0addedf1085e658cb as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ec21920b37e24af0addedf1085e658cb 2024-11-20T11:21:10,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ec21920b37e24af0addedf1085e658cb, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T11:21:10,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 2bb1124baa057df845a5f13f3b500be1 in 1248ms, sequenceid=53, compaction requested=true 2024-11-20T11:21:10,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:10,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:10,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:10,935 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:10,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:10,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:10,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:10,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:10,935 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:10,936 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:10,936 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:10,936 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/B is initiating minor compaction (all files) 2024-11-20T11:21:10,936 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/A is initiating minor compaction (all files) 2024-11-20T11:21:10,936 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/B in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,936 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/A in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,937 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/496592e6d57145319e43309bb60d2c79, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/adf8b4a1eed547c1914af0045b692e79, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/48c5613d68c143d684163a45847e115a] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=39.7 K 2024-11-20T11:21:10,937 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/8ebab740165545bcb1f43949b9b403d6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/c863a86bf56b4999b145bbadc84cd402, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/9cd36941004740dc83d0438a59d13667] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=35.2 K 2024-11-20T11:21:10,937 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ebab740165545bcb1f43949b9b403d6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732101667901 2024-11-20T11:21:10,937 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 496592e6d57145319e43309bb60d2c79, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732101667901 2024-11-20T11:21:10,937 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting adf8b4a1eed547c1914af0045b692e79, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732101667919 2024-11-20T11:21:10,937 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting c863a86bf56b4999b145bbadc84cd402, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732101667919 2024-11-20T11:21:10,938 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cd36941004740dc83d0438a59d13667, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732101669069 2024-11-20T11:21:10,938 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48c5613d68c143d684163a45847e115a, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732101669042 2024-11-20T11:21:10,944 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#B#compaction#336 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:10,944 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/ab5a84b814c6406cbd610cb2311b4546 is 50, key is test_row_0/B:col10/1732101669069/Put/seqid=0 2024-11-20T11:21:10,945 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#A#compaction#337 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:10,945 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/ec3e47f3c6e04a06858b5b6e4824efc2 is 50, key is test_row_0/A:col10/1732101669069/Put/seqid=0 2024-11-20T11:21:10,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742230_1406 (size=12104) 2024-11-20T11:21:10,954 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/ab5a84b814c6406cbd610cb2311b4546 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/ab5a84b814c6406cbd610cb2311b4546 2024-11-20T11:21:10,958 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/B of 2bb1124baa057df845a5f13f3b500be1 into ab5a84b814c6406cbd610cb2311b4546(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:10,958 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:10,958 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/B, priority=13, startTime=1732101670935; duration=0sec 2024-11-20T11:21:10,958 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:10,958 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:B 2024-11-20T11:21:10,958 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:10,959 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:10,959 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/C is initiating minor compaction (all files) 2024-11-20T11:21:10,959 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/C in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:10,959 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ff99e61c52434257a548e4cce19e9c71, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/1ba4a825944c45d4bb9588c52d309067, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ec21920b37e24af0addedf1085e658cb] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=35.2 K 2024-11-20T11:21:10,960 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting ff99e61c52434257a548e4cce19e9c71, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732101667901 2024-11-20T11:21:10,960 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ba4a825944c45d4bb9588c52d309067, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732101667919 2024-11-20T11:21:10,960 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting ec21920b37e24af0addedf1085e658cb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732101669069 2024-11-20T11:21:10,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742231_1407 (size=12104) 2024-11-20T11:21:10,974 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#C#compaction#338 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:10,974 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/3df81d2a2f874f04bc07baa4611422d6 is 50, key is test_row_0/C:col10/1732101669069/Put/seqid=0 2024-11-20T11:21:10,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742232_1408 (size=12104) 2024-11-20T11:21:11,069 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:11,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T11:21:11,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:11,070 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-20T11:21:11,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:11,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:11,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:11,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:11,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:11,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:11,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/8a1df6cc68874e97abbb97e78acbe3cd is 50, key is test_row_0/A:col10/1732101669704/Put/seqid=0 2024-11-20T11:21:11,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742233_1409 (size=12001) 2024-11-20T11:21:11,081 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/8a1df6cc68874e97abbb97e78acbe3cd 2024-11-20T11:21:11,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/b66056d6716c487190fabaeb7536c952 is 50, key is test_row_0/B:col10/1732101669704/Put/seqid=0 2024-11-20T11:21:11,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742234_1410 (size=12001) 2024-11-20T11:21:11,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T11:21:11,372 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/ec3e47f3c6e04a06858b5b6e4824efc2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/ec3e47f3c6e04a06858b5b6e4824efc2 2024-11-20T11:21:11,381 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/A of 2bb1124baa057df845a5f13f3b500be1 into ec3e47f3c6e04a06858b5b6e4824efc2(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:11,381 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:11,381 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/A, priority=13, startTime=1732101670935; duration=0sec 2024-11-20T11:21:11,381 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:11,381 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:A 2024-11-20T11:21:11,390 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/3df81d2a2f874f04bc07baa4611422d6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/3df81d2a2f874f04bc07baa4611422d6 2024-11-20T11:21:11,394 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/C of 2bb1124baa057df845a5f13f3b500be1 into 3df81d2a2f874f04bc07baa4611422d6(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:11,394 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:11,394 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/C, priority=13, startTime=1732101670935; duration=0sec 2024-11-20T11:21:11,394 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:11,394 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:C 2024-11-20T11:21:11,493 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/b66056d6716c487190fabaeb7536c952 2024-11-20T11:21:11,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/ccd2ca8a12274fa3a4fa0c643d367628 is 50, key is test_row_0/C:col10/1732101669704/Put/seqid=0 2024-11-20T11:21:11,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742235_1411 (size=12001) 2024-11-20T11:21:11,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:11,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:11,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:11,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101731845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:11,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:11,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101731846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:11,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:11,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:11,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101731846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:11,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101731847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:11,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:11,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101731849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:11,903 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/ccd2ca8a12274fa3a4fa0c643d367628 2024-11-20T11:21:11,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/8a1df6cc68874e97abbb97e78acbe3cd as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/8a1df6cc68874e97abbb97e78acbe3cd 2024-11-20T11:21:11,913 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/8a1df6cc68874e97abbb97e78acbe3cd, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T11:21:11,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/b66056d6716c487190fabaeb7536c952 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/b66056d6716c487190fabaeb7536c952 2024-11-20T11:21:11,918 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/b66056d6716c487190fabaeb7536c952, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T11:21:11,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/ccd2ca8a12274fa3a4fa0c643d367628 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ccd2ca8a12274fa3a4fa0c643d367628 2024-11-20T11:21:11,922 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ccd2ca8a12274fa3a4fa0c643d367628, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T11:21:11,923 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 2bb1124baa057df845a5f13f3b500be1 in 853ms, sequenceid=74, compaction requested=false 2024-11-20T11:21:11,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:11,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:11,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-20T11:21:11,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-20T11:21:11,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-20T11:21:11,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9230 sec 2024-11-20T11:21:11,927 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.9270 sec 2024-11-20T11:21:11,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:11,958 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T11:21:11,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:11,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:11,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:11,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:11,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:11,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:11,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/603ec13e8d1d45b4a18ed2f9e8345e9a is 50, key is test_row_0/A:col10/1732101671844/Put/seqid=0 2024-11-20T11:21:11,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742236_1412 (size=12001) 2024-11-20T11:21:11,969 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/603ec13e8d1d45b4a18ed2f9e8345e9a 2024-11-20T11:21:11,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/65da86d2427a48b0b0a25acf9758b97f is 50, key is test_row_0/B:col10/1732101671844/Put/seqid=0 2024-11-20T11:21:11,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742237_1413 (size=12001) 2024-11-20T11:21:11,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/65da86d2427a48b0b0a25acf9758b97f 2024-11-20T11:21:11,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:11,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101731980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:11,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:11,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101731981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:11,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:11,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101731981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:11,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:11,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101731981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:11,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:11,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101731987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:11,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/9b8c1be031834f38a6dbd230f9744a42 is 50, key is test_row_0/C:col10/1732101671844/Put/seqid=0 2024-11-20T11:21:12,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742238_1414 (size=12001) 2024-11-20T11:21:12,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101732088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101732088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101732089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101732089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101732094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T11:21:12,104 INFO [Thread-1798 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-20T11:21:12,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:12,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-20T11:21:12,106 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:12,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T11:21:12,107 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:12,107 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:12,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T11:21:12,258 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T11:21:12,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:12,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:12,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:12,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:12,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:12,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:12,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101732294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101732294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101732295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101732296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101732300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/9b8c1be031834f38a6dbd230f9744a42 2024-11-20T11:21:12,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/603ec13e8d1d45b4a18ed2f9e8345e9a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/603ec13e8d1d45b4a18ed2f9e8345e9a 2024-11-20T11:21:12,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T11:21:12,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/603ec13e8d1d45b4a18ed2f9e8345e9a, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T11:21:12,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/65da86d2427a48b0b0a25acf9758b97f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/65da86d2427a48b0b0a25acf9758b97f 2024-11-20T11:21:12,411 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T11:21:12,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:12,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:12,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:12,412 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:12,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:12,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:12,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/65da86d2427a48b0b0a25acf9758b97f, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T11:21:12,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/9b8c1be031834f38a6dbd230f9744a42 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/9b8c1be031834f38a6dbd230f9744a42 2024-11-20T11:21:12,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/9b8c1be031834f38a6dbd230f9744a42, entries=150, sequenceid=94, filesize=11.7 K 2024-11-20T11:21:12,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 2bb1124baa057df845a5f13f3b500be1 in 461ms, sequenceid=94, compaction requested=true 2024-11-20T11:21:12,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:12,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:12,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:12,418 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:12,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:12,418 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:12,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:12,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:12,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:12,419 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:12,419 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:12,419 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/A is initiating minor compaction (all files) 2024-11-20T11:21:12,419 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/B is initiating minor compaction (all files) 2024-11-20T11:21:12,419 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/A in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:12,419 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/B in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:12,419 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/ab5a84b814c6406cbd610cb2311b4546, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/b66056d6716c487190fabaeb7536c952, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/65da86d2427a48b0b0a25acf9758b97f] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=35.3 K 2024-11-20T11:21:12,419 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/ec3e47f3c6e04a06858b5b6e4824efc2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/8a1df6cc68874e97abbb97e78acbe3cd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/603ec13e8d1d45b4a18ed2f9e8345e9a] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=35.3 K 2024-11-20T11:21:12,420 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting ab5a84b814c6406cbd610cb2311b4546, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732101669069 2024-11-20T11:21:12,420 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec3e47f3c6e04a06858b5b6e4824efc2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732101669069 2024-11-20T11:21:12,420 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting b66056d6716c487190fabaeb7536c952, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732101669704 2024-11-20T11:21:12,420 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a1df6cc68874e97abbb97e78acbe3cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732101669704 2024-11-20T11:21:12,420 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 65da86d2427a48b0b0a25acf9758b97f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732101671844 2024-11-20T11:21:12,421 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 603ec13e8d1d45b4a18ed2f9e8345e9a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732101671844 2024-11-20T11:21:12,431 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#B#compaction#345 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:12,431 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#A#compaction#346 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:12,432 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/41e4caec17f34246a1a6d84daa64a9a4 is 50, key is test_row_0/B:col10/1732101671844/Put/seqid=0 2024-11-20T11:21:12,432 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/541e3cd8d6a542789185da89fbea7783 is 50, key is test_row_0/A:col10/1732101671844/Put/seqid=0 2024-11-20T11:21:12,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742240_1416 (size=12207) 2024-11-20T11:21:12,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742239_1415 (size=12207) 2024-11-20T11:21:12,564 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,564 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T11:21:12,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:12,565 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T11:21:12,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:12,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:12,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:12,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:12,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:12,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:12,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/95b9dd1be8864c5188df869fce94a203 is 50, key is test_row_0/A:col10/1732101671980/Put/seqid=0 2024-11-20T11:21:12,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742241_1417 (size=12001) 2024-11-20T11:21:12,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:12,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:12,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101732643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101732644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101732645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101732645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101732647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T11:21:12,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101732753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101732753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101732754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101732754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101732754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,840 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/41e4caec17f34246a1a6d84daa64a9a4 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/41e4caec17f34246a1a6d84daa64a9a4 2024-11-20T11:21:12,841 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/541e3cd8d6a542789185da89fbea7783 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/541e3cd8d6a542789185da89fbea7783 2024-11-20T11:21:12,845 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/B of 2bb1124baa057df845a5f13f3b500be1 into 41e4caec17f34246a1a6d84daa64a9a4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:12,845 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:12,845 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/B, priority=13, startTime=1732101672418; duration=0sec 2024-11-20T11:21:12,845 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:12,845 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:B 2024-11-20T11:21:12,845 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:12,845 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/A of 2bb1124baa057df845a5f13f3b500be1 into 541e3cd8d6a542789185da89fbea7783(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:12,846 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:12,846 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/A, priority=13, startTime=1732101672418; duration=0sec 2024-11-20T11:21:12,846 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:12,846 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:A 2024-11-20T11:21:12,846 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:12,846 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/C is initiating minor compaction (all files) 2024-11-20T11:21:12,846 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/C in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:12,847 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/3df81d2a2f874f04bc07baa4611422d6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ccd2ca8a12274fa3a4fa0c643d367628, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/9b8c1be031834f38a6dbd230f9744a42] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=35.3 K 2024-11-20T11:21:12,847 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 3df81d2a2f874f04bc07baa4611422d6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732101669069 2024-11-20T11:21:12,847 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting ccd2ca8a12274fa3a4fa0c643d367628, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732101669704 2024-11-20T11:21:12,847 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b8c1be031834f38a6dbd230f9744a42, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732101671844 2024-11-20T11:21:12,853 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#C#compaction#348 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:12,853 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/eb525675cb6c4b7c9cab6b040065edfa is 50, key is test_row_0/C:col10/1732101671844/Put/seqid=0 2024-11-20T11:21:12,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742242_1418 (size=12207) 2024-11-20T11:21:12,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101732956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101732959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101732959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101732960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:12,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101732960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:12,974 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/95b9dd1be8864c5188df869fce94a203 2024-11-20T11:21:12,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/cd4bcc7bfcc248d686ad8eed61bfc9ae is 50, key is test_row_0/B:col10/1732101671980/Put/seqid=0 2024-11-20T11:21:12,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742243_1419 (size=12001) 2024-11-20T11:21:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T11:21:13,261 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/eb525675cb6c4b7c9cab6b040065edfa as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/eb525675cb6c4b7c9cab6b040065edfa 2024-11-20T11:21:13,266 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/C of 2bb1124baa057df845a5f13f3b500be1 into eb525675cb6c4b7c9cab6b040065edfa(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:13,266 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:13,266 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/C, priority=13, startTime=1732101672418; duration=0sec 2024-11-20T11:21:13,266 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:13,266 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:C 2024-11-20T11:21:13,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101733264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:13,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101733264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:13,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101733265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:13,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101733266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:13,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101733267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:13,385 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/cd4bcc7bfcc248d686ad8eed61bfc9ae 2024-11-20T11:21:13,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/c38d333c83a74f0f846d90f00970448d is 50, key is test_row_0/C:col10/1732101671980/Put/seqid=0 2024-11-20T11:21:13,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742244_1420 (size=12001) 2024-11-20T11:21:13,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101733769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:13,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101733770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:13,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101733771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:13,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101733772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:13,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101733773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:13,797 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/c38d333c83a74f0f846d90f00970448d 2024-11-20T11:21:13,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/95b9dd1be8864c5188df869fce94a203 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/95b9dd1be8864c5188df869fce94a203 2024-11-20T11:21:13,806 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/95b9dd1be8864c5188df869fce94a203, entries=150, sequenceid=112, filesize=11.7 K 2024-11-20T11:21:13,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/cd4bcc7bfcc248d686ad8eed61bfc9ae as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/cd4bcc7bfcc248d686ad8eed61bfc9ae 2024-11-20T11:21:13,811 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/cd4bcc7bfcc248d686ad8eed61bfc9ae, entries=150, sequenceid=112, filesize=11.7 K 2024-11-20T11:21:13,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/c38d333c83a74f0f846d90f00970448d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/c38d333c83a74f0f846d90f00970448d 2024-11-20T11:21:13,815 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/c38d333c83a74f0f846d90f00970448d, entries=150, sequenceid=112, filesize=11.7 K 2024-11-20T11:21:13,816 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 2bb1124baa057df845a5f13f3b500be1 in 1251ms, sequenceid=112, compaction requested=false 2024-11-20T11:21:13,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:13,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:13,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-20T11:21:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-20T11:21:13,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-20T11:21:13,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7100 sec 2024-11-20T11:21:13,820 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.7140 sec 2024-11-20T11:21:14,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T11:21:14,210 INFO [Thread-1798 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-20T11:21:14,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:14,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-20T11:21:14,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T11:21:14,213 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:14,214 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:14,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:14,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T11:21:14,365 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:14,365 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T11:21:14,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:14,366 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T11:21:14,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:14,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:14,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:14,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:14,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:14,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:14,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/44756857799642ca9f78d1a4f94c1234 is 50, key is test_row_0/A:col10/1732101672644/Put/seqid=0 2024-11-20T11:21:14,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742245_1421 (size=12051) 2024-11-20T11:21:14,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T11:21:14,775 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/44756857799642ca9f78d1a4f94c1234 2024-11-20T11:21:14,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:14,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:14,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/2793d9b5840c4f82b1bbde0d68baac0c is 50, key is test_row_0/B:col10/1732101672644/Put/seqid=0 2024-11-20T11:21:14,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742246_1422 (size=12051) 2024-11-20T11:21:14,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:14,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101734789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:14,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:14,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101734790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:14,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:14,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101734792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:14,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:14,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101734793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:14,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:14,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101734795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:14,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T11:21:14,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:14,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101734896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:14,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:14,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101734896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:14,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:14,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101734898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:14,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:14,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101734898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:14,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:14,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101734899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101735098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101735099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101735102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101735103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101735104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,187 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/2793d9b5840c4f82b1bbde0d68baac0c 2024-11-20T11:21:15,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/6b55cbccf00146419268b5548015e3c0 is 50, key is test_row_0/C:col10/1732101672644/Put/seqid=0 2024-11-20T11:21:15,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742247_1423 (size=12051) 2024-11-20T11:21:15,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T11:21:15,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101735401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101735406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101735406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101735406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101735408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,598 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/6b55cbccf00146419268b5548015e3c0 2024-11-20T11:21:15,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/44756857799642ca9f78d1a4f94c1234 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/44756857799642ca9f78d1a4f94c1234 2024-11-20T11:21:15,606 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/44756857799642ca9f78d1a4f94c1234, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T11:21:15,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/2793d9b5840c4f82b1bbde0d68baac0c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/2793d9b5840c4f82b1bbde0d68baac0c 2024-11-20T11:21:15,611 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/2793d9b5840c4f82b1bbde0d68baac0c, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T11:21:15,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/6b55cbccf00146419268b5548015e3c0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/6b55cbccf00146419268b5548015e3c0 2024-11-20T11:21:15,615 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/6b55cbccf00146419268b5548015e3c0, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T11:21:15,616 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 2bb1124baa057df845a5f13f3b500be1 in 1250ms, sequenceid=133, compaction requested=true 2024-11-20T11:21:15,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:15,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:15,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-20T11:21:15,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-20T11:21:15,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-20T11:21:15,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4030 sec 2024-11-20T11:21:15,620 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.4070 sec 2024-11-20T11:21:15,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:15,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T11:21:15,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:15,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:15,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:15,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:15,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:15,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:15,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/1cef876d42c341d097d290c22a267aa3 is 50, key is test_row_0/A:col10/1732101674794/Put/seqid=0 2024-11-20T11:21:15,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742248_1424 (size=14541) 2024-11-20T11:21:15,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101735928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101735935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101735936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101735936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:15,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:15,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101735937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101736038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101736042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101736046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101736046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101736046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101736242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101736247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101736249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101736250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101736252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T11:21:16,317 INFO [Thread-1798 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-20T11:21:16,318 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:16,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-20T11:21:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T11:21:16,319 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:16,320 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:16,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:16,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/1cef876d42c341d097d290c22a267aa3 2024-11-20T11:21:16,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/115e41efb7564ee3a3bd093ece305e24 is 50, key is test_row_0/B:col10/1732101674794/Put/seqid=0 2024-11-20T11:21:16,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742249_1425 (size=12151) 2024-11-20T11:21:16,330 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/115e41efb7564ee3a3bd093ece305e24 2024-11-20T11:21:16,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/4c2b2ff5b72e489e9242c5788209c0e7 is 50, key is test_row_0/C:col10/1732101674794/Put/seqid=0 2024-11-20T11:21:16,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742250_1426 (size=12151) 2024-11-20T11:21:16,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T11:21:16,416 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T11:21:16,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T11:21:16,472 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T11:21:16,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:16,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:16,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:16,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:16,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:16,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:16,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101736545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101736554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101736555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101736555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:16,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101736556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T11:21:16,624 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T11:21:16,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:16,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:16,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:16,625 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:16,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:16,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:16,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/4c2b2ff5b72e489e9242c5788209c0e7 2024-11-20T11:21:16,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/1cef876d42c341d097d290c22a267aa3 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/1cef876d42c341d097d290c22a267aa3 2024-11-20T11:21:16,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/1cef876d42c341d097d290c22a267aa3, entries=200, sequenceid=152, filesize=14.2 K 2024-11-20T11:21:16,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/115e41efb7564ee3a3bd093ece305e24 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/115e41efb7564ee3a3bd093ece305e24 2024-11-20T11:21:16,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/115e41efb7564ee3a3bd093ece305e24, entries=150, sequenceid=152, filesize=11.9 K 2024-11-20T11:21:16,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/4c2b2ff5b72e489e9242c5788209c0e7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/4c2b2ff5b72e489e9242c5788209c0e7 2024-11-20T11:21:16,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/4c2b2ff5b72e489e9242c5788209c0e7, entries=150, sequenceid=152, filesize=11.9 K 2024-11-20T11:21:16,757 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 2bb1124baa057df845a5f13f3b500be1 in 846ms, sequenceid=152, compaction requested=true 2024-11-20T11:21:16,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:16,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:16,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:16,758 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:21:16,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:16,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:16,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:16,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:16,758 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:21:16,760 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50800 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:21:16,760 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:21:16,760 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/A is initiating minor compaction (all files) 2024-11-20T11:21:16,760 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/B is initiating minor compaction (all files) 2024-11-20T11:21:16,760 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/B in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:16,760 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/A in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:16,760 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/41e4caec17f34246a1a6d84daa64a9a4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/cd4bcc7bfcc248d686ad8eed61bfc9ae, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/2793d9b5840c4f82b1bbde0d68baac0c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/115e41efb7564ee3a3bd093ece305e24] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=47.3 K 2024-11-20T11:21:16,760 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/541e3cd8d6a542789185da89fbea7783, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/95b9dd1be8864c5188df869fce94a203, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/44756857799642ca9f78d1a4f94c1234, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/1cef876d42c341d097d290c22a267aa3] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=49.6 K 2024-11-20T11:21:16,760 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 541e3cd8d6a542789185da89fbea7783, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732101671844 2024-11-20T11:21:16,760 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 41e4caec17f34246a1a6d84daa64a9a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732101671844 2024-11-20T11:21:16,761 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting cd4bcc7bfcc248d686ad8eed61bfc9ae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732101671979 2024-11-20T11:21:16,761 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95b9dd1be8864c5188df869fce94a203, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732101671979 2024-11-20T11:21:16,762 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 2793d9b5840c4f82b1bbde0d68baac0c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732101672642 2024-11-20T11:21:16,762 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44756857799642ca9f78d1a4f94c1234, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732101672642 2024-11-20T11:21:16,762 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 115e41efb7564ee3a3bd093ece305e24, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732101674783 2024-11-20T11:21:16,762 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cef876d42c341d097d290c22a267aa3, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732101674783 2024-11-20T11:21:16,770 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#B#compaction#357 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:16,770 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/bc2526279aad44f099bbf74539554994 is 50, key is test_row_0/B:col10/1732101674794/Put/seqid=0 2024-11-20T11:21:16,770 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#A#compaction#358 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:16,771 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/76a356fac7db4c759e63d3d664aa076f is 50, key is test_row_0/A:col10/1732101674794/Put/seqid=0 2024-11-20T11:21:16,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742251_1427 (size=12493) 2024-11-20T11:21:16,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742252_1428 (size=12493) 2024-11-20T11:21:16,778 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:16,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T11:21:16,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:16,779 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T11:21:16,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:16,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:16,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:16,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:16,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:16,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:16,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/d32c57774ea742bb9124e086200e71fc is 50, key is test_row_0/A:col10/1732101675934/Put/seqid=0 2024-11-20T11:21:16,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742253_1429 (size=12151) 2024-11-20T11:21:16,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T11:21:17,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:17,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:17,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101737075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101737080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101737081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101737082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101737082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,181 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/bc2526279aad44f099bbf74539554994 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/bc2526279aad44f099bbf74539554994 2024-11-20T11:21:17,182 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/76a356fac7db4c759e63d3d664aa076f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/76a356fac7db4c759e63d3d664aa076f 2024-11-20T11:21:17,187 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/B of 2bb1124baa057df845a5f13f3b500be1 into bc2526279aad44f099bbf74539554994(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:17,187 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:17,187 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/B, priority=12, startTime=1732101676758; duration=0sec 2024-11-20T11:21:17,187 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:17,188 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:B 2024-11-20T11:21:17,188 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:21:17,189 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/A of 2bb1124baa057df845a5f13f3b500be1 into 76a356fac7db4c759e63d3d664aa076f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:17,189 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/d32c57774ea742bb9124e086200e71fc 2024-11-20T11:21:17,189 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:17,189 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/A, priority=12, startTime=1732101676758; duration=0sec 2024-11-20T11:21:17,189 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:17,189 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:A 2024-11-20T11:21:17,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101737183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,190 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:21:17,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101737187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,191 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/C is initiating minor compaction (all files) 2024-11-20T11:21:17,191 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/C in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:17,191 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/eb525675cb6c4b7c9cab6b040065edfa, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/c38d333c83a74f0f846d90f00970448d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/6b55cbccf00146419268b5548015e3c0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/4c2b2ff5b72e489e9242c5788209c0e7] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=47.3 K 2024-11-20T11:21:17,192 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting eb525675cb6c4b7c9cab6b040065edfa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732101671844 2024-11-20T11:21:17,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101737188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,192 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting c38d333c83a74f0f846d90f00970448d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732101671979 2024-11-20T11:21:17,193 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b55cbccf00146419268b5548015e3c0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732101672642 2024-11-20T11:21:17,193 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c2b2ff5b72e489e9242c5788209c0e7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732101674783 2024-11-20T11:21:17,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/52d3674158764b2da6c74645f43b45aa is 50, key is test_row_0/B:col10/1732101675934/Put/seqid=0 2024-11-20T11:21:17,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101737193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101737193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742254_1430 (size=12151) 2024-11-20T11:21:17,202 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/52d3674158764b2da6c74645f43b45aa 2024-11-20T11:21:17,204 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#C#compaction#361 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:17,205 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/0c53b98d8d914e32be49dbc6a97a3c45 is 50, key is test_row_0/C:col10/1732101674794/Put/seqid=0 2024-11-20T11:21:17,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/20c92f2b53c949edba4ef42dd367b941 is 50, key is test_row_0/C:col10/1732101675934/Put/seqid=0 2024-11-20T11:21:17,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742255_1431 (size=12493) 2024-11-20T11:21:17,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742256_1432 (size=12151) 2024-11-20T11:21:17,222 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/20c92f2b53c949edba4ef42dd367b941 2024-11-20T11:21:17,225 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/0c53b98d8d914e32be49dbc6a97a3c45 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/0c53b98d8d914e32be49dbc6a97a3c45 2024-11-20T11:21:17,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/d32c57774ea742bb9124e086200e71fc as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/d32c57774ea742bb9124e086200e71fc 2024-11-20T11:21:17,231 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/C of 2bb1124baa057df845a5f13f3b500be1 into 0c53b98d8d914e32be49dbc6a97a3c45(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:17,231 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:17,231 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/C, priority=12, startTime=1732101676758; duration=0sec 2024-11-20T11:21:17,231 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:17,231 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:C 2024-11-20T11:21:17,233 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/d32c57774ea742bb9124e086200e71fc, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T11:21:17,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/52d3674158764b2da6c74645f43b45aa as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/52d3674158764b2da6c74645f43b45aa 2024-11-20T11:21:17,236 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/52d3674158764b2da6c74645f43b45aa, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T11:21:17,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/20c92f2b53c949edba4ef42dd367b941 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/20c92f2b53c949edba4ef42dd367b941 2024-11-20T11:21:17,240 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/20c92f2b53c949edba4ef42dd367b941, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T11:21:17,241 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 2bb1124baa057df845a5f13f3b500be1 in 461ms, sequenceid=169, compaction requested=false 2024-11-20T11:21:17,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:17,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:17,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-20T11:21:17,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-20T11:21:17,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-20T11:21:17,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 922 msec 2024-11-20T11:21:17,244 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 925 msec 2024-11-20T11:21:17,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:17,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T11:21:17,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:17,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:17,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:17,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:17,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:17,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:17,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/65e275ff722a42b6ad6744c0363139fc is 50, key is test_row_0/A:col10/1732101677081/Put/seqid=0 2024-11-20T11:21:17,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742257_1433 (size=14541) 2024-11-20T11:21:17,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101737403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101737405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101737406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101737408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101737408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T11:21:17,422 INFO [Thread-1798 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-20T11:21:17,423 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:17,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-20T11:21:17,424 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:17,425 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:17,425 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:17,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T11:21:17,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101737509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101737509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101737510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101737513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101737513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T11:21:17,576 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T11:21:17,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:17,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:17,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:17,577 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:17,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:17,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:17,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101737713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101737713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101737714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101737717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:17,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101737718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T11:21:17,728 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T11:21:17,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:17,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:17,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:17,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:17,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:17,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:17,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/65e275ff722a42b6ad6744c0363139fc 2024-11-20T11:21:17,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/8480ddd6d62645a691784ab3585deab7 is 50, key is test_row_0/B:col10/1732101677081/Put/seqid=0 2024-11-20T11:21:17,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742258_1434 (size=12151) 2024-11-20T11:21:17,881 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:17,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T11:21:17,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:17,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:17,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:17,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:17,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:17,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:18,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101738018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:18,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101738018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:18,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101738019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:18,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101738024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:18,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101738024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T11:21:18,034 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T11:21:18,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:18,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:18,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:18,034 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,186 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T11:21:18,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:18,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:18,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:18,187 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/8480ddd6d62645a691784ab3585deab7 2024-11-20T11:21:18,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/1acf9291aab94307a3379a35adecc733 is 50, key is test_row_0/C:col10/1732101677081/Put/seqid=0 2024-11-20T11:21:18,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742259_1435 (size=12151) 2024-11-20T11:21:18,338 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T11:21:18,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:18,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:18,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:18,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,491 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T11:21:18,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:18,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:18,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:18,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:18,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:18,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101738524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:18,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101738525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T11:21:18,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:18,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101738526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:18,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101738528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:18,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101738531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,622 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/1acf9291aab94307a3379a35adecc733 2024-11-20T11:21:18,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/65e275ff722a42b6ad6744c0363139fc as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/65e275ff722a42b6ad6744c0363139fc 2024-11-20T11:21:18,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/65e275ff722a42b6ad6744c0363139fc, entries=200, sequenceid=193, filesize=14.2 K 2024-11-20T11:21:18,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/8480ddd6d62645a691784ab3585deab7 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/8480ddd6d62645a691784ab3585deab7 2024-11-20T11:21:18,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/8480ddd6d62645a691784ab3585deab7, entries=150, sequenceid=193, filesize=11.9 K 2024-11-20T11:21:18,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/1acf9291aab94307a3379a35adecc733 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/1acf9291aab94307a3379a35adecc733 2024-11-20T11:21:18,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/1acf9291aab94307a3379a35adecc733, entries=150, sequenceid=193, filesize=11.9 K 2024-11-20T11:21:18,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 2bb1124baa057df845a5f13f3b500be1 in 1245ms, sequenceid=193, compaction requested=true 2024-11-20T11:21:18,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:18,638 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:18,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:18,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:18,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:18,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:18,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:18,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:18,638 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:18,639 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39185 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:18,639 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/A is initiating minor compaction (all files) 2024-11-20T11:21:18,639 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:18,639 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/A in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:18,639 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/B is initiating minor compaction (all files) 2024-11-20T11:21:18,639 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/B in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:18,639 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/76a356fac7db4c759e63d3d664aa076f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/d32c57774ea742bb9124e086200e71fc, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/65e275ff722a42b6ad6744c0363139fc] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=38.3 K 2024-11-20T11:21:18,639 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/bc2526279aad44f099bbf74539554994, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/52d3674158764b2da6c74645f43b45aa, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/8480ddd6d62645a691784ab3585deab7] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=35.9 K 2024-11-20T11:21:18,639 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 76a356fac7db4c759e63d3d664aa076f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732101674783 2024-11-20T11:21:18,639 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc2526279aad44f099bbf74539554994, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732101674783 2024-11-20T11:21:18,639 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52d3674158764b2da6c74645f43b45aa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732101675934 2024-11-20T11:21:18,639 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting d32c57774ea742bb9124e086200e71fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732101675934 2024-11-20T11:21:18,640 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 65e275ff722a42b6ad6744c0363139fc, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732101677074 2024-11-20T11:21:18,640 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8480ddd6d62645a691784ab3585deab7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732101677074 2024-11-20T11:21:18,644 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:18,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T11:21:18,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:18,645 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T11:21:18,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:18,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:18,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:18,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:18,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:18,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:18,646 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#B#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:18,646 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/1dd5ed1720a94850a03ef20a3a3bf3c1 is 50, key is test_row_0/B:col10/1732101677081/Put/seqid=0 2024-11-20T11:21:18,648 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#A#compaction#367 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:18,648 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/25eb82e030504293af05423dd41df005 is 50, key is test_row_0/A:col10/1732101677081/Put/seqid=0 2024-11-20T11:21:18,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/5be8548a55664fa39d5b612ea612bdfb is 50, key is test_row_0/A:col10/1732101677407/Put/seqid=0 2024-11-20T11:21:18,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742261_1437 (size=12595) 2024-11-20T11:21:18,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742260_1436 (size=12595) 2024-11-20T11:21:18,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742262_1438 (size=12151) 2024-11-20T11:21:19,068 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/1dd5ed1720a94850a03ef20a3a3bf3c1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/1dd5ed1720a94850a03ef20a3a3bf3c1 2024-11-20T11:21:19,068 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/25eb82e030504293af05423dd41df005 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/25eb82e030504293af05423dd41df005 2024-11-20T11:21:19,070 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/5be8548a55664fa39d5b612ea612bdfb 2024-11-20T11:21:19,076 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/A of 2bb1124baa057df845a5f13f3b500be1 into 25eb82e030504293af05423dd41df005(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:19,076 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:19,076 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/A, priority=13, startTime=1732101678638; duration=0sec 2024-11-20T11:21:19,076 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:19,076 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/B of 2bb1124baa057df845a5f13f3b500be1 into 1dd5ed1720a94850a03ef20a3a3bf3c1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:19,076 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:A 2024-11-20T11:21:19,076 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:19,076 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/B, priority=13, startTime=1732101678638; duration=0sec 2024-11-20T11:21:19,076 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:19,076 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:19,076 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:B 2024-11-20T11:21:19,078 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:19,078 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/C is initiating minor compaction (all files) 2024-11-20T11:21:19,078 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/C in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:19,078 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/0c53b98d8d914e32be49dbc6a97a3c45, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/20c92f2b53c949edba4ef42dd367b941, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/1acf9291aab94307a3379a35adecc733] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=35.9 K 2024-11-20T11:21:19,078 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c53b98d8d914e32be49dbc6a97a3c45, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732101674783 2024-11-20T11:21:19,079 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 20c92f2b53c949edba4ef42dd367b941, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732101675934 2024-11-20T11:21:19,079 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1acf9291aab94307a3379a35adecc733, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732101677074 2024-11-20T11:21:19,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/00fec8292328434188b9978fef6cc1ae is 50, key is test_row_0/B:col10/1732101677407/Put/seqid=0 2024-11-20T11:21:19,085 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#C#compaction#370 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:19,086 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/10b3e3bbaa864511b84419e34bd468f4 is 50, key is test_row_0/C:col10/1732101677081/Put/seqid=0 2024-11-20T11:21:19,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742263_1439 (size=12595) 2024-11-20T11:21:19,099 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/10b3e3bbaa864511b84419e34bd468f4 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/10b3e3bbaa864511b84419e34bd468f4 2024-11-20T11:21:19,104 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/C of 2bb1124baa057df845a5f13f3b500be1 into 10b3e3bbaa864511b84419e34bd468f4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:19,104 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:19,104 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/C, priority=13, startTime=1732101678638; duration=0sec 2024-11-20T11:21:19,104 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:19,104 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:C 2024-11-20T11:21:19,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742264_1440 (size=12151) 2024-11-20T11:21:19,514 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/00fec8292328434188b9978fef6cc1ae 2024-11-20T11:21:19,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/2d768d51295a4980b2696ebf57271253 is 50, key is test_row_0/C:col10/1732101677407/Put/seqid=0 2024-11-20T11:21:19,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742265_1441 (size=12151) 2024-11-20T11:21:19,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T11:21:19,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:19,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:19,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101739552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101739554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101739554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101739555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101739555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101739658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101739658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101739659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101739662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101739662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101739861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101739862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101739863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101739865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:19,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101739866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:19,925 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/2d768d51295a4980b2696ebf57271253 2024-11-20T11:21:19,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/5be8548a55664fa39d5b612ea612bdfb as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/5be8548a55664fa39d5b612ea612bdfb 2024-11-20T11:21:19,931 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/5be8548a55664fa39d5b612ea612bdfb, entries=150, sequenceid=208, filesize=11.9 K 2024-11-20T11:21:19,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/00fec8292328434188b9978fef6cc1ae as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/00fec8292328434188b9978fef6cc1ae 2024-11-20T11:21:19,935 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/00fec8292328434188b9978fef6cc1ae, entries=150, sequenceid=208, filesize=11.9 K 2024-11-20T11:21:19,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/2d768d51295a4980b2696ebf57271253 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/2d768d51295a4980b2696ebf57271253 2024-11-20T11:21:19,939 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/2d768d51295a4980b2696ebf57271253, entries=150, sequenceid=208, filesize=11.9 K 2024-11-20T11:21:19,940 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 2bb1124baa057df845a5f13f3b500be1 in 1294ms, sequenceid=208, compaction requested=false 2024-11-20T11:21:19,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:19,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:19,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-20T11:21:19,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-20T11:21:19,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-20T11:21:19,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5160 sec 2024-11-20T11:21:19,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 2.5190 sec 2024-11-20T11:21:20,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:20,169 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T11:21:20,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:20,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:20,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:20,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:20,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:20,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:20,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/dfe89eb6df6c476c84c96131c679a9f5 is 50, key is test_row_0/A:col10/1732101679554/Put/seqid=0 2024-11-20T11:21:20,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742266_1442 (size=14541) 2024-11-20T11:21:20,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101740179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101740180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101740183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101740184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101740185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101740286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101740286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101740287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101740292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101740292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101740489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101740489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101740491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101740496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101740496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/dfe89eb6df6c476c84c96131c679a9f5 2024-11-20T11:21:20,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/3ae5d44186b1438cb989e0854223039a is 50, key is test_row_0/B:col10/1732101679554/Put/seqid=0 2024-11-20T11:21:20,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742267_1443 (size=12151) 2024-11-20T11:21:20,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101740796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101740796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101740797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101740804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:20,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101740804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:20,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/3ae5d44186b1438cb989e0854223039a 2024-11-20T11:21:20,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/fda0cbfe14f84be8b44d7912a5aa14d5 is 50, key is test_row_0/C:col10/1732101679554/Put/seqid=0 2024-11-20T11:21:21,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742268_1444 (size=12151) 2024-11-20T11:21:21,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:21,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101741300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:21,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:21,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101741301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:21,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:21,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101741301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:21,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:21,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101741308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:21,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:21,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101741310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:21,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/fda0cbfe14f84be8b44d7912a5aa14d5 2024-11-20T11:21:21,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/dfe89eb6df6c476c84c96131c679a9f5 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/dfe89eb6df6c476c84c96131c679a9f5 2024-11-20T11:21:21,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/dfe89eb6df6c476c84c96131c679a9f5, entries=200, sequenceid=234, filesize=14.2 K 2024-11-20T11:21:21,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/3ae5d44186b1438cb989e0854223039a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3ae5d44186b1438cb989e0854223039a 2024-11-20T11:21:21,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3ae5d44186b1438cb989e0854223039a, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T11:21:21,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/fda0cbfe14f84be8b44d7912a5aa14d5 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/fda0cbfe14f84be8b44d7912a5aa14d5 2024-11-20T11:21:21,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/fda0cbfe14f84be8b44d7912a5aa14d5, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T11:21:21,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 2bb1124baa057df845a5f13f3b500be1 in 1249ms, sequenceid=234, compaction requested=true 2024-11-20T11:21:21,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:21,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:21,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:21,418 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:21,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:21,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:21,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:21,418 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:21,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:21,419 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39287 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:21,419 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/A is initiating minor compaction (all files) 2024-11-20T11:21:21,419 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/A in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:21,419 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/25eb82e030504293af05423dd41df005, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/5be8548a55664fa39d5b612ea612bdfb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/dfe89eb6df6c476c84c96131c679a9f5] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=38.4 K 2024-11-20T11:21:21,419 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:21,420 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/B is initiating minor compaction (all files) 2024-11-20T11:21:21,420 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/B in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:21,420 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25eb82e030504293af05423dd41df005, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732101677074 2024-11-20T11:21:21,420 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/1dd5ed1720a94850a03ef20a3a3bf3c1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/00fec8292328434188b9978fef6cc1ae, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3ae5d44186b1438cb989e0854223039a] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=36.0 K 2024-11-20T11:21:21,420 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1dd5ed1720a94850a03ef20a3a3bf3c1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732101677074 2024-11-20T11:21:21,420 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5be8548a55664fa39d5b612ea612bdfb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732101677405 2024-11-20T11:21:21,420 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 00fec8292328434188b9978fef6cc1ae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732101677405 2024-11-20T11:21:21,420 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfe89eb6df6c476c84c96131c679a9f5, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732101679553 2024-11-20T11:21:21,421 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ae5d44186b1438cb989e0854223039a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732101679553 2024-11-20T11:21:21,426 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#B#compaction#375 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:21,427 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/2d5b2d68dcfd419b8e8868b0594f042d is 50, key is test_row_0/B:col10/1732101679554/Put/seqid=0 2024-11-20T11:21:21,427 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#A#compaction#376 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:21,428 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/3bfd3034b1674611816b0f29ab906ddd is 50, key is test_row_0/A:col10/1732101679554/Put/seqid=0 2024-11-20T11:21:21,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742270_1446 (size=12697) 2024-11-20T11:21:21,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742269_1445 (size=12697) 2024-11-20T11:21:21,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T11:21:21,530 INFO [Thread-1798 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-20T11:21:21,531 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-20T11:21:21,532 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T11:21:21,533 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:21,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:21,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T11:21:21,685 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:21,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-20T11:21:21,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:21,685 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T11:21:21,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:21,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:21,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:21,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:21,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:21,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/192c27bbda9c4f55b4d6d6bbba2c997f is 50, key is test_row_0/A:col10/1732101680184/Put/seqid=0 2024-11-20T11:21:21,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742271_1447 (size=12151) 2024-11-20T11:21:21,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T11:21:21,842 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/3bfd3034b1674611816b0f29ab906ddd as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/3bfd3034b1674611816b0f29ab906ddd 2024-11-20T11:21:21,843 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/2d5b2d68dcfd419b8e8868b0594f042d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/2d5b2d68dcfd419b8e8868b0594f042d 2024-11-20T11:21:21,848 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/A of 2bb1124baa057df845a5f13f3b500be1 into 3bfd3034b1674611816b0f29ab906ddd(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:21,848 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/B of 2bb1124baa057df845a5f13f3b500be1 into 2d5b2d68dcfd419b8e8868b0594f042d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:21,848 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:21,848 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:21,848 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/A, priority=13, startTime=1732101681418; duration=0sec 2024-11-20T11:21:21,848 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/B, priority=13, startTime=1732101681418; duration=0sec 2024-11-20T11:21:21,848 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:21,848 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:A 2024-11-20T11:21:21,848 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:21,848 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:B 2024-11-20T11:21:21,848 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:21,849 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:21,849 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/C is initiating minor compaction (all files) 2024-11-20T11:21:21,849 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/C in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:21,849 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/10b3e3bbaa864511b84419e34bd468f4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/2d768d51295a4980b2696ebf57271253, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/fda0cbfe14f84be8b44d7912a5aa14d5] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=36.0 K 2024-11-20T11:21:21,850 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10b3e3bbaa864511b84419e34bd468f4, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732101677074 2024-11-20T11:21:21,850 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d768d51295a4980b2696ebf57271253, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732101677405 2024-11-20T11:21:21,851 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting fda0cbfe14f84be8b44d7912a5aa14d5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732101679553 2024-11-20T11:21:21,857 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#C#compaction#378 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:21,857 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/ef5d0d791b4f4d89aa2b32b0a3bcfd12 is 50, key is test_row_0/C:col10/1732101679554/Put/seqid=0 2024-11-20T11:21:21,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742272_1448 (size=12697) 2024-11-20T11:21:22,094 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/192c27bbda9c4f55b4d6d6bbba2c997f 2024-11-20T11:21:22,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/179d181f820a46708d8c6ec290b8df66 is 50, key is test_row_0/B:col10/1732101680184/Put/seqid=0 2024-11-20T11:21:22,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742273_1449 (size=12151) 2024-11-20T11:21:22,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T11:21:22,274 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/ef5d0d791b4f4d89aa2b32b0a3bcfd12 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ef5d0d791b4f4d89aa2b32b0a3bcfd12 2024-11-20T11:21:22,278 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/C of 2bb1124baa057df845a5f13f3b500be1 into ef5d0d791b4f4d89aa2b32b0a3bcfd12(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:22,278 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:22,278 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/C, priority=13, startTime=1732101681418; duration=0sec 2024-11-20T11:21:22,279 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:22,279 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:C 2024-11-20T11:21:22,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:22,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:22,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101742330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101742331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101742332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101742335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101742336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101742436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101742436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101742436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101742440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101742443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,511 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/179d181f820a46708d8c6ec290b8df66 2024-11-20T11:21:22,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/35967c929efb4c64a80c9d4de1c196ea is 50, key is test_row_0/C:col10/1732101680184/Put/seqid=0 2024-11-20T11:21:22,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742274_1450 (size=12151) 2024-11-20T11:21:22,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T11:21:22,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101742639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101742639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101742640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101742643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101742647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,921 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/35967c929efb4c64a80c9d4de1c196ea 2024-11-20T11:21:22,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/192c27bbda9c4f55b4d6d6bbba2c997f as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/192c27bbda9c4f55b4d6d6bbba2c997f 2024-11-20T11:21:22,929 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/192c27bbda9c4f55b4d6d6bbba2c997f, entries=150, sequenceid=248, filesize=11.9 K 2024-11-20T11:21:22,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/179d181f820a46708d8c6ec290b8df66 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/179d181f820a46708d8c6ec290b8df66 2024-11-20T11:21:22,933 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/179d181f820a46708d8c6ec290b8df66, entries=150, sequenceid=248, filesize=11.9 K 2024-11-20T11:21:22,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/35967c929efb4c64a80c9d4de1c196ea as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/35967c929efb4c64a80c9d4de1c196ea 2024-11-20T11:21:22,938 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/35967c929efb4c64a80c9d4de1c196ea, entries=150, sequenceid=248, filesize=11.9 K 2024-11-20T11:21:22,939 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 2bb1124baa057df845a5f13f3b500be1 in 1254ms, sequenceid=248, compaction requested=false 2024-11-20T11:21:22,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:22,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:22,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-20T11:21:22,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-20T11:21:22,941 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-20T11:21:22,941 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4070 sec 2024-11-20T11:21:22,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.4110 sec 2024-11-20T11:21:22,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:22,945 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T11:21:22,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:22,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:22,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:22,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:22,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:22,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:22,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/a21b3fa0d4874c3dbacce9160cdfe9e0 is 50, key is test_row_0/A:col10/1732101682944/Put/seqid=0 2024-11-20T11:21:22,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742275_1451 (size=12301) 2024-11-20T11:21:22,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101742954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101742955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101742959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101742960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:22,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:22,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101742960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101743061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101743061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101743064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101743068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101743068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101743264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101743264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101743269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101743271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101743272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/a21b3fa0d4874c3dbacce9160cdfe9e0 2024-11-20T11:21:23,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/3efc83ccba374c42a566c0901bd146aa is 50, key is test_row_0/B:col10/1732101682944/Put/seqid=0 2024-11-20T11:21:23,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742276_1452 (size=12301) 2024-11-20T11:21:23,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101743567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101743569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101743575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101743576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:23,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101743577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T11:21:23,636 INFO [Thread-1798 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-20T11:21:23,637 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:23,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-11-20T11:21:23,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T11:21:23,639 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:23,639 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:23,640 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:23,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T11:21:23,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/3efc83ccba374c42a566c0901bd146aa 2024-11-20T11:21:23,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/c699318b988141088fa31a7b9cc6e7fd is 50, key is test_row_0/C:col10/1732101682944/Put/seqid=0 2024-11-20T11:21:23,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742277_1453 (size=12301) 2024-11-20T11:21:23,791 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T11:21:23,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:23,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:23,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:23,792 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:23,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:23,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:23,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T11:21:23,944 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:23,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T11:21:23,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:23,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:23,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:23,944 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:23,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:23,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:24,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:24,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101744072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:24,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:24,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101744076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:24,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:24,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101744082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:24,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:24,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101744082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:24,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:24,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101744083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:24,096 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:24,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T11:21:24,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:24,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:24,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:24,097 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:24,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:24,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:24,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/c699318b988141088fa31a7b9cc6e7fd 2024-11-20T11:21:24,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/a21b3fa0d4874c3dbacce9160cdfe9e0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/a21b3fa0d4874c3dbacce9160cdfe9e0 2024-11-20T11:21:24,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/a21b3fa0d4874c3dbacce9160cdfe9e0, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T11:21:24,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/3efc83ccba374c42a566c0901bd146aa as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3efc83ccba374c42a566c0901bd146aa 2024-11-20T11:21:24,194 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3efc83ccba374c42a566c0901bd146aa, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T11:21:24,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/c699318b988141088fa31a7b9cc6e7fd as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/c699318b988141088fa31a7b9cc6e7fd 2024-11-20T11:21:24,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/c699318b988141088fa31a7b9cc6e7fd, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T11:21:24,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 2bb1124baa057df845a5f13f3b500be1 in 1255ms, sequenceid=277, compaction requested=true 2024-11-20T11:21:24,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:24,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:24,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:24,200 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:24,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:24,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:24,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:24,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:24,200 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:24,201 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:24,201 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:24,201 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/B is initiating minor compaction (all files) 2024-11-20T11:21:24,201 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/A is initiating minor compaction (all files) 2024-11-20T11:21:24,201 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/A in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:24,201 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/B in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:24,201 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/3bfd3034b1674611816b0f29ab906ddd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/192c27bbda9c4f55b4d6d6bbba2c997f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/a21b3fa0d4874c3dbacce9160cdfe9e0] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=36.3 K 2024-11-20T11:21:24,201 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/2d5b2d68dcfd419b8e8868b0594f042d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/179d181f820a46708d8c6ec290b8df66, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3efc83ccba374c42a566c0901bd146aa] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=36.3 K 2024-11-20T11:21:24,202 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bfd3034b1674611816b0f29ab906ddd, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732101679553 2024-11-20T11:21:24,202 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d5b2d68dcfd419b8e8868b0594f042d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732101679553 2024-11-20T11:21:24,202 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 192c27bbda9c4f55b4d6d6bbba2c997f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732101680173 2024-11-20T11:21:24,202 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 179d181f820a46708d8c6ec290b8df66, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732101680173 2024-11-20T11:21:24,202 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting a21b3fa0d4874c3dbacce9160cdfe9e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732101682944 2024-11-20T11:21:24,202 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 3efc83ccba374c42a566c0901bd146aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732101682944 2024-11-20T11:21:24,209 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#A#compaction#384 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:24,209 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/87330a8d017c4ac09b1f636c7b66e552 is 50, key is test_row_0/A:col10/1732101682944/Put/seqid=0 2024-11-20T11:21:24,209 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#B#compaction#385 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:24,210 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/82ea8183cb9e4bb58328ec1d00993329 is 50, key is test_row_0/B:col10/1732101682944/Put/seqid=0 2024-11-20T11:21:24,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742278_1454 (size=12949) 2024-11-20T11:21:24,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742279_1455 (size=12949) 2024-11-20T11:21:24,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T11:21:24,249 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:24,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-20T11:21:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:24,250 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T11:21:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:24,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:24,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/278a72487a4e4e0897800183ba31f8a6 is 50, key is test_row_0/A:col10/1732101682959/Put/seqid=0 2024-11-20T11:21:24,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742280_1456 (size=12301) 2024-11-20T11:21:24,619 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/82ea8183cb9e4bb58328ec1d00993329 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/82ea8183cb9e4bb58328ec1d00993329 2024-11-20T11:21:24,619 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/87330a8d017c4ac09b1f636c7b66e552 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/87330a8d017c4ac09b1f636c7b66e552 2024-11-20T11:21:24,623 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/B of 2bb1124baa057df845a5f13f3b500be1 into 82ea8183cb9e4bb58328ec1d00993329(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:24,623 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/A of 2bb1124baa057df845a5f13f3b500be1 into 87330a8d017c4ac09b1f636c7b66e552(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:24,623 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:24,623 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:24,623 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/B, priority=13, startTime=1732101684200; duration=0sec 2024-11-20T11:21:24,623 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/A, priority=13, startTime=1732101684200; duration=0sec 2024-11-20T11:21:24,623 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:24,623 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:24,623 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:A 2024-11-20T11:21:24,623 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:B 2024-11-20T11:21:24,623 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:24,624 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:24,624 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/C is initiating minor compaction (all files) 2024-11-20T11:21:24,624 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/C in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:24,624 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ef5d0d791b4f4d89aa2b32b0a3bcfd12, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/35967c929efb4c64a80c9d4de1c196ea, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/c699318b988141088fa31a7b9cc6e7fd] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=36.3 K 2024-11-20T11:21:24,625 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef5d0d791b4f4d89aa2b32b0a3bcfd12, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732101679553 2024-11-20T11:21:24,625 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35967c929efb4c64a80c9d4de1c196ea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732101680173 2024-11-20T11:21:24,625 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting c699318b988141088fa31a7b9cc6e7fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732101682944 2024-11-20T11:21:24,631 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#C#compaction#387 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:24,631 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/92f2f979abef49d6a43bed42b86ddd60 is 50, key is test_row_0/C:col10/1732101682944/Put/seqid=0 2024-11-20T11:21:24,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742281_1457 (size=12949) 2024-11-20T11:21:24,659 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/278a72487a4e4e0897800183ba31f8a6 2024-11-20T11:21:24,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/e504d4e4289249fbbaf568465ebd3301 is 50, key is test_row_0/B:col10/1732101682959/Put/seqid=0 2024-11-20T11:21:24,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742282_1458 (size=12301) 2024-11-20T11:21:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T11:21:25,040 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/92f2f979abef49d6a43bed42b86ddd60 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/92f2f979abef49d6a43bed42b86ddd60 2024-11-20T11:21:25,045 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/C of 2bb1124baa057df845a5f13f3b500be1 into 92f2f979abef49d6a43bed42b86ddd60(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:25,045 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:25,045 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/C, priority=13, startTime=1732101684200; duration=0sec 2024-11-20T11:21:25,045 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:25,045 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:C 2024-11-20T11:21:25,071 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/e504d4e4289249fbbaf568465ebd3301 2024-11-20T11:21:25,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/89ee865b03f84d16963773fb03c2fc6b is 50, key is test_row_0/C:col10/1732101682959/Put/seqid=0 2024-11-20T11:21:25,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742283_1459 (size=12301) 2024-11-20T11:21:25,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:25,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:25,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101745109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101745110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101745111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101745113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101745115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101745216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101745217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101745218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101745220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,224 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101745222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101745420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101745422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101745422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101745424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101745426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,483 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/89ee865b03f84d16963773fb03c2fc6b 2024-11-20T11:21:25,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/278a72487a4e4e0897800183ba31f8a6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/278a72487a4e4e0897800183ba31f8a6 2024-11-20T11:21:25,490 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/278a72487a4e4e0897800183ba31f8a6, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T11:21:25,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/e504d4e4289249fbbaf568465ebd3301 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/e504d4e4289249fbbaf568465ebd3301 2024-11-20T11:21:25,494 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/e504d4e4289249fbbaf568465ebd3301, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T11:21:25,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/89ee865b03f84d16963773fb03c2fc6b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/89ee865b03f84d16963773fb03c2fc6b 2024-11-20T11:21:25,498 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/89ee865b03f84d16963773fb03c2fc6b, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T11:21:25,499 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 2bb1124baa057df845a5f13f3b500be1 in 1249ms, sequenceid=288, compaction requested=false 2024-11-20T11:21:25,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:25,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:25,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-11-20T11:21:25,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-11-20T11:21:25,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-20T11:21:25,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8600 sec 2024-11-20T11:21:25,502 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.8630 sec 2024-11-20T11:21:25,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:25,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T11:21:25,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:25,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:25,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:25,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:25,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:25,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:25,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/966aa463a0f7491eaeff84f450f1af7b is 50, key is test_row_0/A:col10/1732101685728/Put/seqid=0 2024-11-20T11:21:25,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742284_1460 (size=17181) 2024-11-20T11:21:25,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101745734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101745735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101745735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101745736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T11:21:25,742 INFO [Thread-1798 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-20T11:21:25,743 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-11-20T11:21:25,744 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:25,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101745739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T11:21:25,745 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:25,745 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:25,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101745841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101745841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101745841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101745841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T11:21:25,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:25,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101745845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,896 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:25,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T11:21:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:25,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:25,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T11:21:26,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101746044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101746045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101746045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,049 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101746045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T11:21:26,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:26,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:26,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:26,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101746048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,141 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/966aa463a0f7491eaeff84f450f1af7b 2024-11-20T11:21:26,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/967e17fb11b74160b8ccebbe2c4847bc is 50, key is test_row_0/B:col10/1732101685728/Put/seqid=0 2024-11-20T11:21:26,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742285_1461 (size=12301) 2024-11-20T11:21:26,201 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T11:21:26,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:26,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:26,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:26,202 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T11:21:26,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101746347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101746347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101746348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101746351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,354 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101746352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T11:21:26,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:26,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:26,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:26,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T11:21:26,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:26,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:26,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:26,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:26,552 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/967e17fb11b74160b8ccebbe2c4847bc 2024-11-20T11:21:26,559 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/2dd1f3d77834435cb77ed795a4613500 is 50, key is test_row_0/C:col10/1732101685728/Put/seqid=0 2024-11-20T11:21:26,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742286_1462 (size=12301) 2024-11-20T11:21:26,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/2dd1f3d77834435cb77ed795a4613500 2024-11-20T11:21:26,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/966aa463a0f7491eaeff84f450f1af7b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/966aa463a0f7491eaeff84f450f1af7b 2024-11-20T11:21:26,572 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/966aa463a0f7491eaeff84f450f1af7b, entries=250, sequenceid=318, filesize=16.8 K 2024-11-20T11:21:26,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/967e17fb11b74160b8ccebbe2c4847bc as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/967e17fb11b74160b8ccebbe2c4847bc 2024-11-20T11:21:26,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/967e17fb11b74160b8ccebbe2c4847bc, entries=150, sequenceid=318, filesize=12.0 K 2024-11-20T11:21:26,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/2dd1f3d77834435cb77ed795a4613500 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/2dd1f3d77834435cb77ed795a4613500 2024-11-20T11:21:26,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/2dd1f3d77834435cb77ed795a4613500, entries=150, sequenceid=318, filesize=12.0 K 2024-11-20T11:21:26,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 2bb1124baa057df845a5f13f3b500be1 in 852ms, sequenceid=318, compaction requested=true 2024-11-20T11:21:26,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:26,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:26,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:26,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:26,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:26,581 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:26,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:26,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T11:21:26,581 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:26,582 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42431 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:26,582 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:26,582 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/B is initiating minor compaction (all files) 2024-11-20T11:21:26,582 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/A is initiating minor compaction (all files) 2024-11-20T11:21:26,583 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/B in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:26,583 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/A in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:26,583 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/87330a8d017c4ac09b1f636c7b66e552, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/278a72487a4e4e0897800183ba31f8a6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/966aa463a0f7491eaeff84f450f1af7b] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=41.4 K 2024-11-20T11:21:26,583 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/82ea8183cb9e4bb58328ec1d00993329, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/e504d4e4289249fbbaf568465ebd3301, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/967e17fb11b74160b8ccebbe2c4847bc] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=36.7 K 2024-11-20T11:21:26,583 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87330a8d017c4ac09b1f636c7b66e552, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732101682944 2024-11-20T11:21:26,583 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 82ea8183cb9e4bb58328ec1d00993329, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732101682944 2024-11-20T11:21:26,583 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting e504d4e4289249fbbaf568465ebd3301, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732101682953 2024-11-20T11:21:26,583 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 278a72487a4e4e0897800183ba31f8a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732101682953 2024-11-20T11:21:26,584 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 967e17fb11b74160b8ccebbe2c4847bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732101685109 2024-11-20T11:21:26,584 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 966aa463a0f7491eaeff84f450f1af7b, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732101685109 2024-11-20T11:21:26,590 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#B#compaction#393 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:26,591 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#A#compaction#394 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:26,591 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/e41f941994f54cf99ad199d1f7a9b8cc is 50, key is test_row_0/B:col10/1732101685728/Put/seqid=0 2024-11-20T11:21:26,591 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/0a46eca3e33a4bdaae1f29ccc7473491 is 50, key is test_row_0/A:col10/1732101685728/Put/seqid=0 2024-11-20T11:21:26,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742288_1464 (size=13051) 2024-11-20T11:21:26,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742287_1463 (size=13051) 2024-11-20T11:21:26,601 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/0a46eca3e33a4bdaae1f29ccc7473491 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/0a46eca3e33a4bdaae1f29ccc7473491 2024-11-20T11:21:26,601 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/e41f941994f54cf99ad199d1f7a9b8cc as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/e41f941994f54cf99ad199d1f7a9b8cc 2024-11-20T11:21:26,605 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/B of 2bb1124baa057df845a5f13f3b500be1 into e41f941994f54cf99ad199d1f7a9b8cc(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:26,605 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:26,605 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/B, priority=13, startTime=1732101686581; duration=0sec 2024-11-20T11:21:26,605 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:26,605 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:B 2024-11-20T11:21:26,605 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:26,606 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/A of 2bb1124baa057df845a5f13f3b500be1 into 0a46eca3e33a4bdaae1f29ccc7473491(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:26,606 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:26,606 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/A, priority=13, startTime=1732101686581; duration=0sec 2024-11-20T11:21:26,606 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:26,606 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/C is initiating minor compaction (all files) 2024-11-20T11:21:26,606 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:26,606 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:A 2024-11-20T11:21:26,606 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/C in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:26,606 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/92f2f979abef49d6a43bed42b86ddd60, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/89ee865b03f84d16963773fb03c2fc6b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/2dd1f3d77834435cb77ed795a4613500] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=36.7 K 2024-11-20T11:21:26,606 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 92f2f979abef49d6a43bed42b86ddd60, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732101682944 2024-11-20T11:21:26,607 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 89ee865b03f84d16963773fb03c2fc6b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732101682953 2024-11-20T11:21:26,607 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 2dd1f3d77834435cb77ed795a4613500, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732101685109 2024-11-20T11:21:26,613 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#C#compaction#395 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:26,613 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/24071853091b4590bbd38a8be903dbfe is 50, key is test_row_0/C:col10/1732101685728/Put/seqid=0 2024-11-20T11:21:26,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742289_1465 (size=13051) 2024-11-20T11:21:26,622 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/24071853091b4590bbd38a8be903dbfe as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/24071853091b4590bbd38a8be903dbfe 2024-11-20T11:21:26,627 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/C of 2bb1124baa057df845a5f13f3b500be1 into 24071853091b4590bbd38a8be903dbfe(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:26,627 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:26,627 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/C, priority=13, startTime=1732101686581; duration=0sec 2024-11-20T11:21:26,627 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:26,627 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:C 2024-11-20T11:21:26,659 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-20T11:21:26,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:26,660 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T11:21:26,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:26,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:26,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:26,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:26,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:26,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:26,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/dd49a3b6227f4f28833b8ad6d19399c8 is 50, key is test_row_0/A:col10/1732101685738/Put/seqid=0 2024-11-20T11:21:26,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742290_1466 (size=9857) 2024-11-20T11:21:26,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T11:21:26,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:26,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:26,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101746900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101746900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101746900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101746901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:26,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:26,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101746901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101747007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101747007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101747007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101747008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101747008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,070 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/dd49a3b6227f4f28833b8ad6d19399c8 2024-11-20T11:21:27,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/552709ba4ee64d5d8530c56838265c62 is 50, key is test_row_0/B:col10/1732101685738/Put/seqid=0 2024-11-20T11:21:27,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742291_1467 (size=9857) 2024-11-20T11:21:27,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101747211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101747211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101747211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101747211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101747212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,481 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/552709ba4ee64d5d8530c56838265c62 2024-11-20T11:21:27,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/99e20cfc6b1841ee85956475e631e7ce is 50, key is test_row_0/C:col10/1732101685738/Put/seqid=0 2024-11-20T11:21:27,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742292_1468 (size=9857) 2024-11-20T11:21:27,494 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/99e20cfc6b1841ee85956475e631e7ce 2024-11-20T11:21:27,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/dd49a3b6227f4f28833b8ad6d19399c8 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/dd49a3b6227f4f28833b8ad6d19399c8 2024-11-20T11:21:27,502 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/dd49a3b6227f4f28833b8ad6d19399c8, entries=100, sequenceid=330, filesize=9.6 K 2024-11-20T11:21:27,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/552709ba4ee64d5d8530c56838265c62 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/552709ba4ee64d5d8530c56838265c62 2024-11-20T11:21:27,506 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/552709ba4ee64d5d8530c56838265c62, entries=100, sequenceid=330, filesize=9.6 K 2024-11-20T11:21:27,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/99e20cfc6b1841ee85956475e631e7ce as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/99e20cfc6b1841ee85956475e631e7ce 2024-11-20T11:21:27,512 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/99e20cfc6b1841ee85956475e631e7ce, entries=100, sequenceid=330, filesize=9.6 K 2024-11-20T11:21:27,513 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 2bb1124baa057df845a5f13f3b500be1 in 853ms, sequenceid=330, compaction requested=false 2024-11-20T11:21:27,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:27,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:27,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-20T11:21:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-11-20T11:21:27,515 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-20T11:21:27,515 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7690 sec 2024-11-20T11:21:27,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.7730 sec 2024-11-20T11:21:27,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:27,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T11:21:27,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:27,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:27,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:27,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:27,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:27,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:27,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/4b83ed6122c440e7949202cfe06aa23a is 50, key is test_row_0/A:col10/1732101687517/Put/seqid=0 2024-11-20T11:21:27,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742293_1469 (size=12301) 2024-11-20T11:21:27,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101747518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101747519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101747547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101747547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101747547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101747648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101747648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101747653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101747653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101747653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-20T11:21:27,848 INFO [Thread-1798 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-20T11:21:27,850 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:27,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees 2024-11-20T11:21:27,851 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T11:21:27,851 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:27,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:27,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101747852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101747852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101747855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101747857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:27,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101747858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:27,896 DEBUG [Thread-1803 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x154f0f85 to 127.0.0.1:62733 2024-11-20T11:21:27,896 DEBUG [Thread-1803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:27,896 DEBUG [Thread-1799 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11193a0c to 127.0.0.1:62733 2024-11-20T11:21:27,896 DEBUG [Thread-1799 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:27,898 DEBUG [Thread-1805 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x008a917b to 127.0.0.1:62733 2024-11-20T11:21:27,898 DEBUG [Thread-1805 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:27,900 DEBUG [Thread-1801 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7861b162 to 127.0.0.1:62733 2024-11-20T11:21:27,900 DEBUG [Thread-1801 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:27,901 DEBUG [Thread-1807 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x054c2725 to 127.0.0.1:62733 2024-11-20T11:21:27,901 DEBUG [Thread-1807 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:27,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/4b83ed6122c440e7949202cfe06aa23a 2024-11-20T11:21:27,941 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/d5d6d999830a446cbf0afedadb09c985 is 50, key is test_row_0/B:col10/1732101687517/Put/seqid=0 2024-11-20T11:21:27,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742294_1470 (size=12301) 2024-11-20T11:21:27,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T11:21:28,006 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T11:21:28,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:28,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,007 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T11:21:28,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:28,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101748155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:28,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101748157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,159 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,159 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T11:21:28,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:28,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,159 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:28,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101748161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:28,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101748162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:28,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101748163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,311 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T11:21:28,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:28,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,344 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/d5d6d999830a446cbf0afedadb09c985 2024-11-20T11:21:28,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/0ab9f190f8b64a6dbe706fb6d61eb933 is 50, key is test_row_0/C:col10/1732101687517/Put/seqid=0 2024-11-20T11:21:28,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742295_1471 (size=12301) 2024-11-20T11:21:28,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T11:21:28,463 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T11:21:28,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:28,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,616 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T11:21:28,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:28,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,616 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:28,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:28,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49210 deadline: 1732101748657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:28,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49184 deadline: 1732101748660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:28,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49196 deadline: 1732101748662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:28,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49218 deadline: 1732101748665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:28,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1732101748665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,754 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/0ab9f190f8b64a6dbe706fb6d61eb933 2024-11-20T11:21:28,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/4b83ed6122c440e7949202cfe06aa23a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/4b83ed6122c440e7949202cfe06aa23a 2024-11-20T11:21:28,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/4b83ed6122c440e7949202cfe06aa23a, entries=150, sequenceid=360, filesize=12.0 K 2024-11-20T11:21:28,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/d5d6d999830a446cbf0afedadb09c985 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/d5d6d999830a446cbf0afedadb09c985 2024-11-20T11:21:28,763 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/d5d6d999830a446cbf0afedadb09c985, entries=150, sequenceid=360, filesize=12.0 K 2024-11-20T11:21:28,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/0ab9f190f8b64a6dbe706fb6d61eb933 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/0ab9f190f8b64a6dbe706fb6d61eb933 2024-11-20T11:21:28,766 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/0ab9f190f8b64a6dbe706fb6d61eb933, entries=150, sequenceid=360, filesize=12.0 K 2024-11-20T11:21:28,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 2bb1124baa057df845a5f13f3b500be1 in 1249ms, sequenceid=360, compaction requested=true 2024-11-20T11:21:28,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:28,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:28,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:28,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:28,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:28,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bb1124baa057df845a5f13f3b500be1:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:28,767 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:28,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:28,767 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:28,767 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:28,767 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:28,767 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/A is initiating minor compaction (all files) 2024-11-20T11:21:28,767 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/B is initiating minor compaction (all files) 2024-11-20T11:21:28,767 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/A in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,767 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/B in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,767 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/0a46eca3e33a4bdaae1f29ccc7473491, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/dd49a3b6227f4f28833b8ad6d19399c8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/4b83ed6122c440e7949202cfe06aa23a] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=34.4 K 2024-11-20T11:21:28,767 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/e41f941994f54cf99ad199d1f7a9b8cc, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/552709ba4ee64d5d8530c56838265c62, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/d5d6d999830a446cbf0afedadb09c985] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=34.4 K 2024-11-20T11:21:28,768 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting e41f941994f54cf99ad199d1f7a9b8cc, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732101685109 2024-11-20T11:21:28,768 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a46eca3e33a4bdaae1f29ccc7473491, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732101685109 2024-11-20T11:21:28,768 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:28,768 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd49a3b6227f4f28833b8ad6d19399c8, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732101685733 2024-11-20T11:21:28,768 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 552709ba4ee64d5d8530c56838265c62, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732101685733 2024-11-20T11:21:28,768 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting d5d6d999830a446cbf0afedadb09c985, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732101686900 2024-11-20T11:21:28,768 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b83ed6122c440e7949202cfe06aa23a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732101686900 2024-11-20T11:21:28,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-20T11:21:28,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:28,769 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-20T11:21:28,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:28,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:28,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:28,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:28,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:28,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:28,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/68c5011bf0f94ddf8233eed8f89c1ce9 is 50, key is test_row_0/A:col10/1732101687519/Put/seqid=0 2024-11-20T11:21:28,774 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#A#compaction#403 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:28,775 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#B#compaction#404 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:28,775 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/543806f5428b4564a5307a55cce391c0 is 50, key is test_row_0/A:col10/1732101687517/Put/seqid=0 2024-11-20T11:21:28,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742296_1472 (size=9857) 2024-11-20T11:21:28,775 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/3ff33d44cdc2418db9bd1b088d7cf1a8 is 50, key is test_row_0/B:col10/1732101687517/Put/seqid=0 2024-11-20T11:21:28,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742297_1473 (size=13153) 2024-11-20T11:21:28,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742298_1474 (size=13153) 2024-11-20T11:21:28,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T11:21:29,176 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/68c5011bf0f94ddf8233eed8f89c1ce9 2024-11-20T11:21:29,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/c821559177664402813644a07ea15efc is 50, key is test_row_0/B:col10/1732101687519/Put/seqid=0 2024-11-20T11:21:29,182 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/543806f5428b4564a5307a55cce391c0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/543806f5428b4564a5307a55cce391c0 2024-11-20T11:21:29,182 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/3ff33d44cdc2418db9bd1b088d7cf1a8 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3ff33d44cdc2418db9bd1b088d7cf1a8 2024-11-20T11:21:29,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742299_1475 (size=9857) 2024-11-20T11:21:29,186 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/B of 2bb1124baa057df845a5f13f3b500be1 into 3ff33d44cdc2418db9bd1b088d7cf1a8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:29,186 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/A of 2bb1124baa057df845a5f13f3b500be1 into 543806f5428b4564a5307a55cce391c0(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:29,186 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:29,186 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:29,186 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/A, priority=13, startTime=1732101688766; duration=0sec 2024-11-20T11:21:29,186 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/B, priority=13, startTime=1732101688766; duration=0sec 2024-11-20T11:21:29,186 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:29,186 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:A 2024-11-20T11:21:29,186 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:29,186 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:B 2024-11-20T11:21:29,186 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:29,187 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:29,187 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2bb1124baa057df845a5f13f3b500be1/C is initiating minor compaction (all files) 2024-11-20T11:21:29,187 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2bb1124baa057df845a5f13f3b500be1/C in TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:29,187 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/24071853091b4590bbd38a8be903dbfe, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/99e20cfc6b1841ee85956475e631e7ce, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/0ab9f190f8b64a6dbe706fb6d61eb933] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp, totalSize=34.4 K 2024-11-20T11:21:29,187 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24071853091b4590bbd38a8be903dbfe, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732101685109 2024-11-20T11:21:29,188 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99e20cfc6b1841ee85956475e631e7ce, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732101685733 2024-11-20T11:21:29,188 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ab9f190f8b64a6dbe706fb6d61eb933, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732101686900 2024-11-20T11:21:29,193 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bb1124baa057df845a5f13f3b500be1#C#compaction#406 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:29,193 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/4506c6353db24a2687aade46acb47832 is 50, key is test_row_0/C:col10/1732101687517/Put/seqid=0 2024-11-20T11:21:29,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742300_1476 (size=13153) 2024-11-20T11:21:29,586 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/c821559177664402813644a07ea15efc 2024-11-20T11:21:29,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/213860c582bf4da0b5b238a37dc8ca47 is 50, key is test_row_0/C:col10/1732101687519/Put/seqid=0 2024-11-20T11:21:29,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742301_1477 (size=9857) 2024-11-20T11:21:29,599 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/4506c6353db24a2687aade46acb47832 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/4506c6353db24a2687aade46acb47832 2024-11-20T11:21:29,602 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2bb1124baa057df845a5f13f3b500be1/C of 2bb1124baa057df845a5f13f3b500be1 into 4506c6353db24a2687aade46acb47832(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:29,602 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:29,602 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1., storeName=2bb1124baa057df845a5f13f3b500be1/C, priority=13, startTime=1732101688767; duration=0sec 2024-11-20T11:21:29,603 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:29,603 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bb1124baa057df845a5f13f3b500be1:C 2024-11-20T11:21:29,667 DEBUG [Thread-1790 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ef40578 to 127.0.0.1:62733 2024-11-20T11:21:29,667 DEBUG [Thread-1790 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:29,668 DEBUG [Thread-1794 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06bc0f7c to 127.0.0.1:62733 2024-11-20T11:21:29,668 DEBUG [Thread-1794 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:29,668 DEBUG [Thread-1792 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x032bb71c to 127.0.0.1:62733 2024-11-20T11:21:29,668 DEBUG [Thread-1792 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:29,669 DEBUG [Thread-1796 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8b6e04 to 127.0.0.1:62733 2024-11-20T11:21:29,669 DEBUG [Thread-1796 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:29,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:29,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. as already flushing 2024-11-20T11:21:29,676 DEBUG [Thread-1788 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32c12a30 to 127.0.0.1:62733 2024-11-20T11:21:29,676 DEBUG [Thread-1788 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:29,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T11:21:29,995 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/213860c582bf4da0b5b238a37dc8ca47 2024-11-20T11:21:29,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/68c5011bf0f94ddf8233eed8f89c1ce9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/68c5011bf0f94ddf8233eed8f89c1ce9 2024-11-20T11:21:30,002 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/68c5011bf0f94ddf8233eed8f89c1ce9, entries=100, sequenceid=366, filesize=9.6 K 2024-11-20T11:21:30,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/c821559177664402813644a07ea15efc as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/c821559177664402813644a07ea15efc 2024-11-20T11:21:30,004 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/c821559177664402813644a07ea15efc, entries=100, sequenceid=366, filesize=9.6 K 2024-11-20T11:21:30,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/213860c582bf4da0b5b238a37dc8ca47 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/213860c582bf4da0b5b238a37dc8ca47 2024-11-20T11:21:30,007 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/213860c582bf4da0b5b238a37dc8ca47, entries=100, sequenceid=366, filesize=9.6 K 2024-11-20T11:21:30,008 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=33.54 KB/34350 for 2bb1124baa057df845a5f13f3b500be1 in 1239ms, sequenceid=366, compaction requested=false 2024-11-20T11:21:30,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2538): Flush status journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:30,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:30,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=151 2024-11-20T11:21:30,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=151 2024-11-20T11:21:30,010 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-20T11:21:30,010 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1570 sec 2024-11-20T11:21:30,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees in 2.1600 sec 2024-11-20T11:21:31,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T11:21:31,956 INFO [Thread-1798 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3234 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9702 rows 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3238 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9714 rows 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3241 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9720 rows 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3208 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9624 rows 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3235 2024-11-20T11:21:31,956 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9703 rows 2024-11-20T11:21:31,956 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T11:21:31,956 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x022a6e9f to 127.0.0.1:62733 2024-11-20T11:21:31,956 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:31,958 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T11:21:31,959 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T11:21:31,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:31,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T11:21:31,963 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101691962"}]},"ts":"1732101691962"} 2024-11-20T11:21:31,964 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T11:21:31,966 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T11:21:31,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T11:21:31,967 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2bb1124baa057df845a5f13f3b500be1, UNASSIGN}] 2024-11-20T11:21:31,968 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2bb1124baa057df845a5f13f3b500be1, UNASSIGN 2024-11-20T11:21:31,968 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=2bb1124baa057df845a5f13f3b500be1, regionState=CLOSING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:31,969 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T11:21:31,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; CloseRegionProcedure 2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:21:32,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T11:21:32,120 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:32,120 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(124): Close 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:32,121 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T11:21:32,121 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1681): Closing 2bb1124baa057df845a5f13f3b500be1, disabling compactions & flushes 2024-11-20T11:21:32,121 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:32,121 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:32,121 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. after waiting 0 ms 2024-11-20T11:21:32,121 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:32,121 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(2837): Flushing 2bb1124baa057df845a5f13f3b500be1 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T11:21:32,121 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=A 2024-11-20T11:21:32,121 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:32,121 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=B 2024-11-20T11:21:32,121 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:32,121 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2bb1124baa057df845a5f13f3b500be1, store=C 2024-11-20T11:21:32,121 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:32,124 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/a7d8fa815a324c0cacc15cd79b9f2850 is 50, key is test_row_1/A:col10/1732101689667/Put/seqid=0 2024-11-20T11:21:32,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742302_1478 (size=9857) 2024-11-20T11:21:32,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T11:21:32,528 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/a7d8fa815a324c0cacc15cd79b9f2850 2024-11-20T11:21:32,534 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/4eab7e9eecf643069be72b24e0041c3b is 50, key is test_row_1/B:col10/1732101689667/Put/seqid=0 2024-11-20T11:21:32,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742303_1479 (size=9857) 2024-11-20T11:21:32,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T11:21:32,937 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/4eab7e9eecf643069be72b24e0041c3b 2024-11-20T11:21:32,942 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/a5192665e9294e958b3cb05ae9124628 is 50, key is test_row_1/C:col10/1732101689667/Put/seqid=0 2024-11-20T11:21:32,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742304_1480 (size=9857) 2024-11-20T11:21:33,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T11:21:33,346 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/a5192665e9294e958b3cb05ae9124628 2024-11-20T11:21:33,350 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/A/a7d8fa815a324c0cacc15cd79b9f2850 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/a7d8fa815a324c0cacc15cd79b9f2850 2024-11-20T11:21:33,352 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/a7d8fa815a324c0cacc15cd79b9f2850, entries=100, sequenceid=377, filesize=9.6 K 2024-11-20T11:21:33,353 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/B/4eab7e9eecf643069be72b24e0041c3b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/4eab7e9eecf643069be72b24e0041c3b 2024-11-20T11:21:33,355 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/4eab7e9eecf643069be72b24e0041c3b, entries=100, sequenceid=377, filesize=9.6 K 2024-11-20T11:21:33,356 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/.tmp/C/a5192665e9294e958b3cb05ae9124628 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/a5192665e9294e958b3cb05ae9124628 2024-11-20T11:21:33,358 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/a5192665e9294e958b3cb05ae9124628, entries=100, sequenceid=377, filesize=9.6 K 2024-11-20T11:21:33,359 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 2bb1124baa057df845a5f13f3b500be1 in 1238ms, sequenceid=377, compaction requested=true 2024-11-20T11:21:33,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/496592e6d57145319e43309bb60d2c79, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/adf8b4a1eed547c1914af0045b692e79, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/48c5613d68c143d684163a45847e115a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/ec3e47f3c6e04a06858b5b6e4824efc2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/8a1df6cc68874e97abbb97e78acbe3cd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/541e3cd8d6a542789185da89fbea7783, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/603ec13e8d1d45b4a18ed2f9e8345e9a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/95b9dd1be8864c5188df869fce94a203, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/44756857799642ca9f78d1a4f94c1234, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/1cef876d42c341d097d290c22a267aa3, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/76a356fac7db4c759e63d3d664aa076f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/d32c57774ea742bb9124e086200e71fc, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/65e275ff722a42b6ad6744c0363139fc, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/25eb82e030504293af05423dd41df005, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/5be8548a55664fa39d5b612ea612bdfb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/dfe89eb6df6c476c84c96131c679a9f5, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/3bfd3034b1674611816b0f29ab906ddd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/192c27bbda9c4f55b4d6d6bbba2c997f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/87330a8d017c4ac09b1f636c7b66e552, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/a21b3fa0d4874c3dbacce9160cdfe9e0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/278a72487a4e4e0897800183ba31f8a6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/966aa463a0f7491eaeff84f450f1af7b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/0a46eca3e33a4bdaae1f29ccc7473491, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/dd49a3b6227f4f28833b8ad6d19399c8, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/4b83ed6122c440e7949202cfe06aa23a] to archive 2024-11-20T11:21:33,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:21:33,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/496592e6d57145319e43309bb60d2c79 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/496592e6d57145319e43309bb60d2c79 2024-11-20T11:21:33,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/adf8b4a1eed547c1914af0045b692e79 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/adf8b4a1eed547c1914af0045b692e79 2024-11-20T11:21:33,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/48c5613d68c143d684163a45847e115a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/48c5613d68c143d684163a45847e115a 2024-11-20T11:21:33,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/ec3e47f3c6e04a06858b5b6e4824efc2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/ec3e47f3c6e04a06858b5b6e4824efc2 2024-11-20T11:21:33,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/8a1df6cc68874e97abbb97e78acbe3cd to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/8a1df6cc68874e97abbb97e78acbe3cd 2024-11-20T11:21:33,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/541e3cd8d6a542789185da89fbea7783 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/541e3cd8d6a542789185da89fbea7783 2024-11-20T11:21:33,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/603ec13e8d1d45b4a18ed2f9e8345e9a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/603ec13e8d1d45b4a18ed2f9e8345e9a 2024-11-20T11:21:33,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/95b9dd1be8864c5188df869fce94a203 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/95b9dd1be8864c5188df869fce94a203 2024-11-20T11:21:33,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/44756857799642ca9f78d1a4f94c1234 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/44756857799642ca9f78d1a4f94c1234 2024-11-20T11:21:33,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/1cef876d42c341d097d290c22a267aa3 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/1cef876d42c341d097d290c22a267aa3 2024-11-20T11:21:33,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/76a356fac7db4c759e63d3d664aa076f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/76a356fac7db4c759e63d3d664aa076f 2024-11-20T11:21:33,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/d32c57774ea742bb9124e086200e71fc to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/d32c57774ea742bb9124e086200e71fc 2024-11-20T11:21:33,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/65e275ff722a42b6ad6744c0363139fc to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/65e275ff722a42b6ad6744c0363139fc 2024-11-20T11:21:33,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/25eb82e030504293af05423dd41df005 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/25eb82e030504293af05423dd41df005 2024-11-20T11:21:33,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/5be8548a55664fa39d5b612ea612bdfb to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/5be8548a55664fa39d5b612ea612bdfb 2024-11-20T11:21:33,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/dfe89eb6df6c476c84c96131c679a9f5 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/dfe89eb6df6c476c84c96131c679a9f5 2024-11-20T11:21:33,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/3bfd3034b1674611816b0f29ab906ddd to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/3bfd3034b1674611816b0f29ab906ddd 2024-11-20T11:21:33,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/192c27bbda9c4f55b4d6d6bbba2c997f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/192c27bbda9c4f55b4d6d6bbba2c997f 2024-11-20T11:21:33,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/87330a8d017c4ac09b1f636c7b66e552 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/87330a8d017c4ac09b1f636c7b66e552 2024-11-20T11:21:33,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/a21b3fa0d4874c3dbacce9160cdfe9e0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/a21b3fa0d4874c3dbacce9160cdfe9e0 2024-11-20T11:21:33,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/278a72487a4e4e0897800183ba31f8a6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/278a72487a4e4e0897800183ba31f8a6 2024-11-20T11:21:33,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/966aa463a0f7491eaeff84f450f1af7b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/966aa463a0f7491eaeff84f450f1af7b 2024-11-20T11:21:33,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/0a46eca3e33a4bdaae1f29ccc7473491 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/0a46eca3e33a4bdaae1f29ccc7473491 2024-11-20T11:21:33,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/dd49a3b6227f4f28833b8ad6d19399c8 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/dd49a3b6227f4f28833b8ad6d19399c8 2024-11-20T11:21:33,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/4b83ed6122c440e7949202cfe06aa23a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/4b83ed6122c440e7949202cfe06aa23a 2024-11-20T11:21:33,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/8ebab740165545bcb1f43949b9b403d6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/c863a86bf56b4999b145bbadc84cd402, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/ab5a84b814c6406cbd610cb2311b4546, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/9cd36941004740dc83d0438a59d13667, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/b66056d6716c487190fabaeb7536c952, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/41e4caec17f34246a1a6d84daa64a9a4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/65da86d2427a48b0b0a25acf9758b97f, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/cd4bcc7bfcc248d686ad8eed61bfc9ae, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/2793d9b5840c4f82b1bbde0d68baac0c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/bc2526279aad44f099bbf74539554994, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/115e41efb7564ee3a3bd093ece305e24, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/52d3674158764b2da6c74645f43b45aa, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/1dd5ed1720a94850a03ef20a3a3bf3c1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/8480ddd6d62645a691784ab3585deab7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/00fec8292328434188b9978fef6cc1ae, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/2d5b2d68dcfd419b8e8868b0594f042d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3ae5d44186b1438cb989e0854223039a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/179d181f820a46708d8c6ec290b8df66, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/82ea8183cb9e4bb58328ec1d00993329, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3efc83ccba374c42a566c0901bd146aa, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/e504d4e4289249fbbaf568465ebd3301, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/e41f941994f54cf99ad199d1f7a9b8cc, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/967e17fb11b74160b8ccebbe2c4847bc, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/552709ba4ee64d5d8530c56838265c62, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/d5d6d999830a446cbf0afedadb09c985] to archive 2024-11-20T11:21:33,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:21:33,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/8ebab740165545bcb1f43949b9b403d6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/8ebab740165545bcb1f43949b9b403d6 2024-11-20T11:21:33,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/c863a86bf56b4999b145bbadc84cd402 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/c863a86bf56b4999b145bbadc84cd402 2024-11-20T11:21:33,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/ab5a84b814c6406cbd610cb2311b4546 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/ab5a84b814c6406cbd610cb2311b4546 2024-11-20T11:21:33,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/9cd36941004740dc83d0438a59d13667 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/9cd36941004740dc83d0438a59d13667 2024-11-20T11:21:33,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/b66056d6716c487190fabaeb7536c952 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/b66056d6716c487190fabaeb7536c952 2024-11-20T11:21:33,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/41e4caec17f34246a1a6d84daa64a9a4 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/41e4caec17f34246a1a6d84daa64a9a4 2024-11-20T11:21:33,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/65da86d2427a48b0b0a25acf9758b97f to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/65da86d2427a48b0b0a25acf9758b97f 2024-11-20T11:21:33,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/cd4bcc7bfcc248d686ad8eed61bfc9ae to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/cd4bcc7bfcc248d686ad8eed61bfc9ae 2024-11-20T11:21:33,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/2793d9b5840c4f82b1bbde0d68baac0c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/2793d9b5840c4f82b1bbde0d68baac0c 2024-11-20T11:21:33,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/bc2526279aad44f099bbf74539554994 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/bc2526279aad44f099bbf74539554994 2024-11-20T11:21:33,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/115e41efb7564ee3a3bd093ece305e24 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/115e41efb7564ee3a3bd093ece305e24 2024-11-20T11:21:33,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/52d3674158764b2da6c74645f43b45aa to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/52d3674158764b2da6c74645f43b45aa 2024-11-20T11:21:33,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/1dd5ed1720a94850a03ef20a3a3bf3c1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/1dd5ed1720a94850a03ef20a3a3bf3c1 2024-11-20T11:21:33,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/8480ddd6d62645a691784ab3585deab7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/8480ddd6d62645a691784ab3585deab7 2024-11-20T11:21:33,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/00fec8292328434188b9978fef6cc1ae to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/00fec8292328434188b9978fef6cc1ae 2024-11-20T11:21:33,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/2d5b2d68dcfd419b8e8868b0594f042d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/2d5b2d68dcfd419b8e8868b0594f042d 2024-11-20T11:21:33,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3ae5d44186b1438cb989e0854223039a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3ae5d44186b1438cb989e0854223039a 2024-11-20T11:21:33,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/179d181f820a46708d8c6ec290b8df66 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/179d181f820a46708d8c6ec290b8df66 2024-11-20T11:21:33,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/82ea8183cb9e4bb58328ec1d00993329 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/82ea8183cb9e4bb58328ec1d00993329 2024-11-20T11:21:33,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3efc83ccba374c42a566c0901bd146aa to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3efc83ccba374c42a566c0901bd146aa 2024-11-20T11:21:33,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/e504d4e4289249fbbaf568465ebd3301 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/e504d4e4289249fbbaf568465ebd3301 2024-11-20T11:21:33,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/e41f941994f54cf99ad199d1f7a9b8cc to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/e41f941994f54cf99ad199d1f7a9b8cc 2024-11-20T11:21:33,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/967e17fb11b74160b8ccebbe2c4847bc to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/967e17fb11b74160b8ccebbe2c4847bc 2024-11-20T11:21:33,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/552709ba4ee64d5d8530c56838265c62 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/552709ba4ee64d5d8530c56838265c62 2024-11-20T11:21:33,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/d5d6d999830a446cbf0afedadb09c985 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/d5d6d999830a446cbf0afedadb09c985 2024-11-20T11:21:33,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ff99e61c52434257a548e4cce19e9c71, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/1ba4a825944c45d4bb9588c52d309067, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/3df81d2a2f874f04bc07baa4611422d6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ec21920b37e24af0addedf1085e658cb, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ccd2ca8a12274fa3a4fa0c643d367628, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/eb525675cb6c4b7c9cab6b040065edfa, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/9b8c1be031834f38a6dbd230f9744a42, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/c38d333c83a74f0f846d90f00970448d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/6b55cbccf00146419268b5548015e3c0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/0c53b98d8d914e32be49dbc6a97a3c45, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/4c2b2ff5b72e489e9242c5788209c0e7, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/20c92f2b53c949edba4ef42dd367b941, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/10b3e3bbaa864511b84419e34bd468f4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/1acf9291aab94307a3379a35adecc733, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/2d768d51295a4980b2696ebf57271253, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ef5d0d791b4f4d89aa2b32b0a3bcfd12, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/fda0cbfe14f84be8b44d7912a5aa14d5, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/35967c929efb4c64a80c9d4de1c196ea, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/92f2f979abef49d6a43bed42b86ddd60, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/c699318b988141088fa31a7b9cc6e7fd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/89ee865b03f84d16963773fb03c2fc6b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/24071853091b4590bbd38a8be903dbfe, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/2dd1f3d77834435cb77ed795a4613500, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/99e20cfc6b1841ee85956475e631e7ce, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/0ab9f190f8b64a6dbe706fb6d61eb933] to archive 2024-11-20T11:21:33,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:21:33,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ff99e61c52434257a548e4cce19e9c71 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ff99e61c52434257a548e4cce19e9c71 2024-11-20T11:21:33,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/1ba4a825944c45d4bb9588c52d309067 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/1ba4a825944c45d4bb9588c52d309067 2024-11-20T11:21:33,407 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/3df81d2a2f874f04bc07baa4611422d6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/3df81d2a2f874f04bc07baa4611422d6 2024-11-20T11:21:33,408 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ec21920b37e24af0addedf1085e658cb to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ec21920b37e24af0addedf1085e658cb 2024-11-20T11:21:33,409 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ccd2ca8a12274fa3a4fa0c643d367628 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ccd2ca8a12274fa3a4fa0c643d367628 2024-11-20T11:21:33,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/eb525675cb6c4b7c9cab6b040065edfa to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/eb525675cb6c4b7c9cab6b040065edfa 2024-11-20T11:21:33,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/9b8c1be031834f38a6dbd230f9744a42 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/9b8c1be031834f38a6dbd230f9744a42 2024-11-20T11:21:33,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/c38d333c83a74f0f846d90f00970448d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/c38d333c83a74f0f846d90f00970448d 2024-11-20T11:21:33,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/6b55cbccf00146419268b5548015e3c0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/6b55cbccf00146419268b5548015e3c0 2024-11-20T11:21:33,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/0c53b98d8d914e32be49dbc6a97a3c45 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/0c53b98d8d914e32be49dbc6a97a3c45 2024-11-20T11:21:33,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/4c2b2ff5b72e489e9242c5788209c0e7 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/4c2b2ff5b72e489e9242c5788209c0e7 2024-11-20T11:21:33,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/20c92f2b53c949edba4ef42dd367b941 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/20c92f2b53c949edba4ef42dd367b941 2024-11-20T11:21:33,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/10b3e3bbaa864511b84419e34bd468f4 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/10b3e3bbaa864511b84419e34bd468f4 2024-11-20T11:21:33,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/1acf9291aab94307a3379a35adecc733 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/1acf9291aab94307a3379a35adecc733 2024-11-20T11:21:33,417 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/2d768d51295a4980b2696ebf57271253 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/2d768d51295a4980b2696ebf57271253 2024-11-20T11:21:33,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ef5d0d791b4f4d89aa2b32b0a3bcfd12 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/ef5d0d791b4f4d89aa2b32b0a3bcfd12 2024-11-20T11:21:33,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/fda0cbfe14f84be8b44d7912a5aa14d5 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/fda0cbfe14f84be8b44d7912a5aa14d5 2024-11-20T11:21:33,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/35967c929efb4c64a80c9d4de1c196ea to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/35967c929efb4c64a80c9d4de1c196ea 2024-11-20T11:21:33,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/92f2f979abef49d6a43bed42b86ddd60 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/92f2f979abef49d6a43bed42b86ddd60 2024-11-20T11:21:33,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/c699318b988141088fa31a7b9cc6e7fd to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/c699318b988141088fa31a7b9cc6e7fd 2024-11-20T11:21:33,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/89ee865b03f84d16963773fb03c2fc6b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/89ee865b03f84d16963773fb03c2fc6b 2024-11-20T11:21:33,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/24071853091b4590bbd38a8be903dbfe to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/24071853091b4590bbd38a8be903dbfe 2024-11-20T11:21:33,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/2dd1f3d77834435cb77ed795a4613500 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/2dd1f3d77834435cb77ed795a4613500 2024-11-20T11:21:33,424 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/99e20cfc6b1841ee85956475e631e7ce to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/99e20cfc6b1841ee85956475e631e7ce 2024-11-20T11:21:33,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/0ab9f190f8b64a6dbe706fb6d61eb933 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/0ab9f190f8b64a6dbe706fb6d61eb933 2024-11-20T11:21:33,428 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/recovered.edits/380.seqid, newMaxSeqId=380, maxSeqId=1 2024-11-20T11:21:33,429 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1. 2024-11-20T11:21:33,429 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1635): Region close journal for 2bb1124baa057df845a5f13f3b500be1: 2024-11-20T11:21:33,430 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(170): Closed 2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:33,430 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=2bb1124baa057df845a5f13f3b500be1, regionState=CLOSED 2024-11-20T11:21:33,432 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-20T11:21:33,432 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; CloseRegionProcedure 2bb1124baa057df845a5f13f3b500be1, server=ee8338ed7cc0,35185,1732101546666 in 1.4620 sec 2024-11-20T11:21:33,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-20T11:21:33,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2bb1124baa057df845a5f13f3b500be1, UNASSIGN in 1.4650 sec 2024-11-20T11:21:33,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-20T11:21:33,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4670 sec 2024-11-20T11:21:33,435 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101693435"}]},"ts":"1732101693435"} 2024-11-20T11:21:33,436 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T11:21:33,438 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T11:21:33,439 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4790 sec 2024-11-20T11:21:34,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-20T11:21:34,066 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-11-20T11:21:34,066 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T11:21:34,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:34,067 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:34,068 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=156, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:34,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-20T11:21:34,070 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:34,071 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/recovered.edits] 2024-11-20T11:21:34,073 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/543806f5428b4564a5307a55cce391c0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/543806f5428b4564a5307a55cce391c0 2024-11-20T11:21:34,074 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/68c5011bf0f94ddf8233eed8f89c1ce9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/68c5011bf0f94ddf8233eed8f89c1ce9 2024-11-20T11:21:34,075 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/a7d8fa815a324c0cacc15cd79b9f2850 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/A/a7d8fa815a324c0cacc15cd79b9f2850 2024-11-20T11:21:34,076 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3ff33d44cdc2418db9bd1b088d7cf1a8 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/3ff33d44cdc2418db9bd1b088d7cf1a8 2024-11-20T11:21:34,077 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/4eab7e9eecf643069be72b24e0041c3b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/4eab7e9eecf643069be72b24e0041c3b 2024-11-20T11:21:34,078 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/c821559177664402813644a07ea15efc to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/B/c821559177664402813644a07ea15efc 2024-11-20T11:21:34,079 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/213860c582bf4da0b5b238a37dc8ca47 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/213860c582bf4da0b5b238a37dc8ca47 2024-11-20T11:21:34,080 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/4506c6353db24a2687aade46acb47832 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/4506c6353db24a2687aade46acb47832 2024-11-20T11:21:34,081 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/a5192665e9294e958b3cb05ae9124628 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/C/a5192665e9294e958b3cb05ae9124628 2024-11-20T11:21:34,083 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/recovered.edits/380.seqid to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1/recovered.edits/380.seqid 2024-11-20T11:21:34,084 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2bb1124baa057df845a5f13f3b500be1 2024-11-20T11:21:34,084 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T11:21:34,085 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=156, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:34,086 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T11:21:34,088 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T11:21:34,088 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=156, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:34,089 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T11:21:34,089 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732101694089"}]},"ts":"9223372036854775807"} 2024-11-20T11:21:34,090 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T11:21:34,090 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2bb1124baa057df845a5f13f3b500be1, NAME => 'TestAcidGuarantees,,1732101665738.2bb1124baa057df845a5f13f3b500be1.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T11:21:34,090 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T11:21:34,090 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732101694090"}]},"ts":"9223372036854775807"} 2024-11-20T11:21:34,091 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T11:21:34,093 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=156, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:34,094 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 27 msec 2024-11-20T11:21:34,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-20T11:21:34,169 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 156 completed 2024-11-20T11:21:34,178 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=241 (was 239) - Thread LEAK? -, OpenFileDescriptor=453 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=268 (was 284), ProcessCount=11 (was 11), AvailableMemoryMB=5759 (was 5796) 2024-11-20T11:21:34,188 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=241, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=268, ProcessCount=11, AvailableMemoryMB=5759 2024-11-20T11:21:34,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T11:21:34,189 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T11:21:34,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:34,190 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T11:21:34,191 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:34,191 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 157 2024-11-20T11:21:34,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T11:21:34,191 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T11:21:34,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742305_1481 (size=960) 2024-11-20T11:21:34,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T11:21:34,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T11:21:34,598 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830 2024-11-20T11:21:34,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742306_1482 (size=53) 2024-11-20T11:21:34,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T11:21:35,003 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:21:35,003 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 2c782dcb9cddcab95f8c562ac4eee43c, disabling compactions & flushes 2024-11-20T11:21:35,003 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:35,003 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:35,003 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. after waiting 0 ms 2024-11-20T11:21:35,003 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:35,003 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:35,003 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:35,004 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T11:21:35,005 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732101695004"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732101695004"}]},"ts":"1732101695004"} 2024-11-20T11:21:35,005 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T11:21:35,006 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T11:21:35,006 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101695006"}]},"ts":"1732101695006"} 2024-11-20T11:21:35,006 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T11:21:35,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c782dcb9cddcab95f8c562ac4eee43c, ASSIGN}] 2024-11-20T11:21:35,012 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c782dcb9cddcab95f8c562ac4eee43c, ASSIGN 2024-11-20T11:21:35,013 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c782dcb9cddcab95f8c562ac4eee43c, ASSIGN; state=OFFLINE, location=ee8338ed7cc0,35185,1732101546666; forceNewPlan=false, retain=false 2024-11-20T11:21:35,056 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T11:21:35,163 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=2c782dcb9cddcab95f8c562ac4eee43c, regionState=OPENING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:35,164 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; OpenRegionProcedure 2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:21:35,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T11:21:35,315 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:35,318 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:35,318 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(7285): Opening region: {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} 2024-11-20T11:21:35,318 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:35,318 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:21:35,318 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(7327): checking encryption for 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:35,318 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(7330): checking classloading for 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:35,319 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:35,320 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:21:35,320 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c782dcb9cddcab95f8c562ac4eee43c columnFamilyName A 2024-11-20T11:21:35,321 DEBUG [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:35,321 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.HStore(327): Store=2c782dcb9cddcab95f8c562ac4eee43c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:21:35,321 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:35,322 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:21:35,322 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c782dcb9cddcab95f8c562ac4eee43c columnFamilyName B 2024-11-20T11:21:35,322 DEBUG [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:35,322 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.HStore(327): Store=2c782dcb9cddcab95f8c562ac4eee43c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:21:35,322 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:35,323 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:21:35,323 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c782dcb9cddcab95f8c562ac4eee43c columnFamilyName C 2024-11-20T11:21:35,323 DEBUG [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:35,324 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.HStore(327): Store=2c782dcb9cddcab95f8c562ac4eee43c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:21:35,324 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:35,324 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:35,324 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:35,326 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T11:21:35,326 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1085): writing seq id for 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:35,328 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T11:21:35,328 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1102): Opened 2c782dcb9cddcab95f8c562ac4eee43c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58772819, jitterRate=-0.12421675026416779}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T11:21:35,329 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1001): Region open journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:35,329 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., pid=159, masterSystemTime=1732101695315 2024-11-20T11:21:35,330 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:35,330 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:35,331 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=2c782dcb9cddcab95f8c562ac4eee43c, regionState=OPEN, openSeqNum=2, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:35,332 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-11-20T11:21:35,332 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; OpenRegionProcedure 2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 in 167 msec 2024-11-20T11:21:35,333 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-20T11:21:35,333 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c782dcb9cddcab95f8c562ac4eee43c, ASSIGN in 321 msec 2024-11-20T11:21:35,334 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T11:21:35,334 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101695334"}]},"ts":"1732101695334"} 2024-11-20T11:21:35,335 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T11:21:35,337 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T11:21:35,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1480 sec 2024-11-20T11:21:36,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T11:21:36,295 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-20T11:21:36,296 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d9954b7 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fb684eb 2024-11-20T11:21:36,301 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@537a66f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:36,302 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:36,303 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:36,304 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T11:21:36,305 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43762, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T11:21:36,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T11:21:36,306 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T11:21:36,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=160, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T11:21:36,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742307_1483 (size=996) 2024-11-20T11:21:36,716 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-20T11:21:36,716 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-20T11:21:36,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T11:21:36,719 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c782dcb9cddcab95f8c562ac4eee43c, REOPEN/MOVE}] 2024-11-20T11:21:36,719 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c782dcb9cddcab95f8c562ac4eee43c, REOPEN/MOVE 2024-11-20T11:21:36,720 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=2c782dcb9cddcab95f8c562ac4eee43c, regionState=CLOSING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:36,720 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T11:21:36,721 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE; CloseRegionProcedure 2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:21:36,872 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:36,872 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(124): Close 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:36,872 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T11:21:36,872 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1681): Closing 2c782dcb9cddcab95f8c562ac4eee43c, disabling compactions & flushes 2024-11-20T11:21:36,872 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:36,872 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:36,872 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. after waiting 0 ms 2024-11-20T11:21:36,872 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:36,875 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T11:21:36,876 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:36,876 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1635): Region close journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:36,876 WARN [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegionServer(3786): Not adding moved region record: 2c782dcb9cddcab95f8c562ac4eee43c to self. 2024-11-20T11:21:36,877 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(170): Closed 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:36,877 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=2c782dcb9cddcab95f8c562ac4eee43c, regionState=CLOSED 2024-11-20T11:21:36,879 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=163, resume processing ppid=162 2024-11-20T11:21:36,879 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; CloseRegionProcedure 2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 in 157 msec 2024-11-20T11:21:36,879 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c782dcb9cddcab95f8c562ac4eee43c, REOPEN/MOVE; state=CLOSED, location=ee8338ed7cc0,35185,1732101546666; forceNewPlan=false, retain=true 2024-11-20T11:21:37,030 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=2c782dcb9cddcab95f8c562ac4eee43c, regionState=OPENING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,031 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=162, state=RUNNABLE; OpenRegionProcedure 2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:21:37,182 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,185 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:37,185 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(7285): Opening region: {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} 2024-11-20T11:21:37,185 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,185 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T11:21:37,185 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(7327): checking encryption for 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,185 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(7330): checking classloading for 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,186 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,187 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:21:37,187 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c782dcb9cddcab95f8c562ac4eee43c columnFamilyName A 2024-11-20T11:21:37,188 DEBUG [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:37,189 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.HStore(327): Store=2c782dcb9cddcab95f8c562ac4eee43c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:21:37,189 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,189 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:21:37,189 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c782dcb9cddcab95f8c562ac4eee43c columnFamilyName B 2024-11-20T11:21:37,190 DEBUG [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:37,190 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.HStore(327): Store=2c782dcb9cddcab95f8c562ac4eee43c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:21:37,190 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,190 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-20T11:21:37,190 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c782dcb9cddcab95f8c562ac4eee43c columnFamilyName C 2024-11-20T11:21:37,191 DEBUG [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:37,191 INFO [StoreOpener-2c782dcb9cddcab95f8c562ac4eee43c-1 {}] regionserver.HStore(327): Store=2c782dcb9cddcab95f8c562ac4eee43c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T11:21:37,191 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:37,192 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,192 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,193 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T11:21:37,194 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(1085): writing seq id for 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,195 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(1102): Opened 2c782dcb9cddcab95f8c562ac4eee43c; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67745010, jitterRate=0.009479314088821411}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T11:21:37,195 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(1001): Region open journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:37,196 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., pid=164, masterSystemTime=1732101697182 2024-11-20T11:21:37,197 DEBUG [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:37,197 INFO [RS_OPEN_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:37,197 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=2c782dcb9cddcab95f8c562ac4eee43c, regionState=OPEN, openSeqNum=5, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,199 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=162 2024-11-20T11:21:37,199 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=162, state=SUCCESS; OpenRegionProcedure 2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 in 167 msec 2024-11-20T11:21:37,200 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-20T11:21:37,200 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c782dcb9cddcab95f8c562ac4eee43c, REOPEN/MOVE in 480 msec 2024-11-20T11:21:37,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-11-20T11:21:37,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 483 msec 2024-11-20T11:21:37,202 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 894 msec 2024-11-20T11:21:37,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-11-20T11:21:37,204 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d5efb7a to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@644b7e6 2024-11-20T11:21:37,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6094c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:37,208 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fc332d8 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c9b5141 2024-11-20T11:21:37,210 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@103dfc6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:37,211 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17327621 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11a52cdf 2024-11-20T11:21:37,214 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e047c09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:37,215 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1584f18a to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d7fe431 2024-11-20T11:21:37,217 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d631a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:37,218 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b914bf4 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91d72db 2024-11-20T11:21:37,222 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58971172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:37,223 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d836f78 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d7fe93b 2024-11-20T11:21:37,226 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7846cb78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:37,227 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53305d9b to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11c440f7 2024-11-20T11:21:37,230 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1754bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:37,231 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bb6288a to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58460ef3 2024-11-20T11:21:37,235 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9113f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:37,235 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06556601 to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e8cd1ae 2024-11-20T11:21:37,238 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb75907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:37,239 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x458a85fd to 127.0.0.1:62733 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d832d43 2024-11-20T11:21:37,241 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1d3a95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T11:21:37,246 DEBUG [hconnection-0x4d4eccaa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:37,246 DEBUG [hconnection-0x76c498a9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:37,247 DEBUG [hconnection-0x43c7b1bd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:37,247 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59560, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:37,247 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:37,247 DEBUG [hconnection-0x662357c7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:37,248 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:37,249 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59594, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:37,249 DEBUG [hconnection-0x3b8481e3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:37,250 DEBUG [hconnection-0xabbf740-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:37,250 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59602, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:37,250 DEBUG [hconnection-0x226f705c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:37,250 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59616, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:37,251 DEBUG [hconnection-0x7d54c60-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:37,251 DEBUG [hconnection-0xd0fa45b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:37,252 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59626, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:37,252 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59618, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:37,252 DEBUG [hconnection-0x128e2803-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T11:21:37,252 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59634, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:37,253 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59638, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T11:21:37,256 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:37,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-20T11:21:37,257 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:37,258 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:37,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:37,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T11:21:37,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T11:21:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:37,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:37,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101757272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101757274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101757274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101757274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101757280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205ee386c457014cbeb563abfb6634a485_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101697257/Put/seqid=0 2024-11-20T11:21:37,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742308_1484 (size=12154) 2024-11-20T11:21:37,302 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:37,306 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205ee386c457014cbeb563abfb6634a485_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205ee386c457014cbeb563abfb6634a485_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,306 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/d177dffdcbce4dcea5265763c81ec67d, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:37,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/d177dffdcbce4dcea5265763c81ec67d is 175, key is test_row_0/A:col10/1732101697257/Put/seqid=0 2024-11-20T11:21:37,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742309_1485 (size=30955) 2024-11-20T11:21:37,314 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=21, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/d177dffdcbce4dcea5265763c81ec67d 2024-11-20T11:21:37,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/669c44d92ee74cc1aaa8c2409441b1c6 is 50, key is test_row_0/B:col10/1732101697257/Put/seqid=0 2024-11-20T11:21:37,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742310_1486 (size=12001) 2024-11-20T11:21:37,339 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/669c44d92ee74cc1aaa8c2409441b1c6 2024-11-20T11:21:37,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T11:21:37,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/d40935d20da44ebb8ab4303d47d3ea2a is 50, key is test_row_0/C:col10/1732101697257/Put/seqid=0 2024-11-20T11:21:37,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742311_1487 (size=12001) 2024-11-20T11:21:37,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/d40935d20da44ebb8ab4303d47d3ea2a 2024-11-20T11:21:37,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/d177dffdcbce4dcea5265763c81ec67d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d177dffdcbce4dcea5265763c81ec67d 2024-11-20T11:21:37,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d177dffdcbce4dcea5265763c81ec67d, entries=150, sequenceid=21, filesize=30.2 K 2024-11-20T11:21:37,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101757376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101757377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101757377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101757379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/669c44d92ee74cc1aaa8c2409441b1c6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/669c44d92ee74cc1aaa8c2409441b1c6 2024-11-20T11:21:37,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101757382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,386 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/669c44d92ee74cc1aaa8c2409441b1c6, entries=150, sequenceid=21, filesize=11.7 K 2024-11-20T11:21:37,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/d40935d20da44ebb8ab4303d47d3ea2a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/d40935d20da44ebb8ab4303d47d3ea2a 2024-11-20T11:21:37,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/d40935d20da44ebb8ab4303d47d3ea2a, entries=150, sequenceid=21, filesize=11.7 K 2024-11-20T11:21:37,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=120.76 KB/123660 for 2c782dcb9cddcab95f8c562ac4eee43c in 132ms, sequenceid=21, compaction requested=false 2024-11-20T11:21:37,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:37,409 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T11:21:37,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:37,410 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T11:21:37,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:37,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:37,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:37,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:37,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:37,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:37,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d17ca5d07f3543f6b7443234c857fc7c_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101697272/Put/seqid=0 2024-11-20T11:21:37,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742312_1488 (size=12154) 2024-11-20T11:21:37,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T11:21:37,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:37,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101757589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101757589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101757590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101757590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101757590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101757693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101757693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101757693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101757693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101757694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:37,825 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d17ca5d07f3543f6b7443234c857fc7c_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d17ca5d07f3543f6b7443234c857fc7c_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:37,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/7f5aa82f77894bc8b6c7a048861a123a, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:37,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/7f5aa82f77894bc8b6c7a048861a123a is 175, key is test_row_0/A:col10/1732101697272/Put/seqid=0 2024-11-20T11:21:37,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742313_1489 (size=30955) 2024-11-20T11:21:37,831 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/7f5aa82f77894bc8b6c7a048861a123a 2024-11-20T11:21:37,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/8e234df5e5494253ad694f95bfc1d3b5 is 50, key is test_row_0/B:col10/1732101697272/Put/seqid=0 2024-11-20T11:21:37,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742314_1490 (size=12001) 2024-11-20T11:21:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T11:21:37,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101757896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101757896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101757897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101757897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101757897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101758199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101758199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101758199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101758201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101758202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,241 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/8e234df5e5494253ad694f95bfc1d3b5 2024-11-20T11:21:38,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/bcbf4686a7e94ed0a8f61ab61451aa90 is 50, key is test_row_0/C:col10/1732101697272/Put/seqid=0 2024-11-20T11:21:38,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742315_1491 (size=12001) 2024-11-20T11:21:38,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T11:21:38,652 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/bcbf4686a7e94ed0a8f61ab61451aa90 2024-11-20T11:21:38,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/7f5aa82f77894bc8b6c7a048861a123a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/7f5aa82f77894bc8b6c7a048861a123a 2024-11-20T11:21:38,659 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/7f5aa82f77894bc8b6c7a048861a123a, entries=150, sequenceid=42, filesize=30.2 K 2024-11-20T11:21:38,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/8e234df5e5494253ad694f95bfc1d3b5 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/8e234df5e5494253ad694f95bfc1d3b5 2024-11-20T11:21:38,663 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/8e234df5e5494253ad694f95bfc1d3b5, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T11:21:38,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/bcbf4686a7e94ed0a8f61ab61451aa90 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/bcbf4686a7e94ed0a8f61ab61451aa90 2024-11-20T11:21:38,668 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/bcbf4686a7e94ed0a8f61ab61451aa90, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T11:21:38,668 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 2c782dcb9cddcab95f8c562ac4eee43c in 1258ms, sequenceid=42, compaction requested=false 2024-11-20T11:21:38,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:38,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:38,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-20T11:21:38,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-20T11:21:38,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-20T11:21:38,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4110 sec 2024-11-20T11:21:38,672 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.4150 sec 2024-11-20T11:21:38,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:38,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-20T11:21:38,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:38,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:38,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:38,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:38,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:38,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:38,709 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120936ac1273f5747308cdcb595bf4a78d9_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101697588/Put/seqid=0 2024-11-20T11:21:38,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742316_1492 (size=12154) 2024-11-20T11:21:38,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101758714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101758714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101758715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101758715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101758716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101758819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101758819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101758819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101758819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:38,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101758819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:38,954 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T11:21:39,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101759021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101759021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101759021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101759022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101759022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,113 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:39,117 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120936ac1273f5747308cdcb595bf4a78d9_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120936ac1273f5747308cdcb595bf4a78d9_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:39,118 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/783f7a5671c84de3999f583d7db9e667, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:39,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/783f7a5671c84de3999f583d7db9e667 is 175, key is test_row_0/A:col10/1732101697588/Put/seqid=0 2024-11-20T11:21:39,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742317_1493 (size=30955) 2024-11-20T11:21:39,124 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=60, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/783f7a5671c84de3999f583d7db9e667 2024-11-20T11:21:39,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/ca4890464c004a189076f8f409fbbc03 is 50, key is test_row_0/B:col10/1732101697588/Put/seqid=0 2024-11-20T11:21:39,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742318_1494 (size=12001) 2024-11-20T11:21:39,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101759324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101759324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101759325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101759326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101759327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T11:21:39,363 INFO [Thread-2175 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-20T11:21:39,364 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:39,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-20T11:21:39,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T11:21:39,365 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:39,366 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:39,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:39,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T11:21:39,517 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T11:21:39,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:39,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:39,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:39,518 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:39,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:39,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:39,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/ca4890464c004a189076f8f409fbbc03 2024-11-20T11:21:39,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/67b6706c38a04fce8f330d2e08430fc3 is 50, key is test_row_0/C:col10/1732101697588/Put/seqid=0 2024-11-20T11:21:39,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742319_1495 (size=12001) 2024-11-20T11:21:39,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T11:21:39,670 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T11:21:39,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:39,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:39,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:39,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:39,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:39,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:39,823 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,824 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T11:21:39,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:39,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:39,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:39,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:39,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:39,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:39,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101759826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101759829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101759829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101759830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:39,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101759833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/67b6706c38a04fce8f330d2e08430fc3 2024-11-20T11:21:39,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/783f7a5671c84de3999f583d7db9e667 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/783f7a5671c84de3999f583d7db9e667 2024-11-20T11:21:39,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/783f7a5671c84de3999f583d7db9e667, entries=150, sequenceid=60, filesize=30.2 K 2024-11-20T11:21:39,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/ca4890464c004a189076f8f409fbbc03 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ca4890464c004a189076f8f409fbbc03 2024-11-20T11:21:39,962 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ca4890464c004a189076f8f409fbbc03, entries=150, sequenceid=60, filesize=11.7 K 2024-11-20T11:21:39,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/67b6706c38a04fce8f330d2e08430fc3 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/67b6706c38a04fce8f330d2e08430fc3 2024-11-20T11:21:39,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/67b6706c38a04fce8f330d2e08430fc3, entries=150, sequenceid=60, filesize=11.7 K 2024-11-20T11:21:39,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 2c782dcb9cddcab95f8c562ac4eee43c in 1265ms, sequenceid=60, compaction requested=true 2024-11-20T11:21:39,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T11:21:39,968 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:39,968 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:39,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:39,968 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:39,969 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/A is initiating minor compaction (all files) 2024-11-20T11:21:39,969 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/A in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:39,969 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d177dffdcbce4dcea5265763c81ec67d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/7f5aa82f77894bc8b6c7a048861a123a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/783f7a5671c84de3999f583d7db9e667] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=90.7 K 2024-11-20T11:21:39,969 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:39,969 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d177dffdcbce4dcea5265763c81ec67d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/7f5aa82f77894bc8b6c7a048861a123a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/783f7a5671c84de3999f583d7db9e667] 2024-11-20T11:21:39,969 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:39,969 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting d177dffdcbce4dcea5265763c81ec67d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732101697256 2024-11-20T11:21:39,969 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/B is initiating minor compaction (all files) 2024-11-20T11:21:39,969 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/B in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:39,969 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/669c44d92ee74cc1aaa8c2409441b1c6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/8e234df5e5494253ad694f95bfc1d3b5, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ca4890464c004a189076f8f409fbbc03] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=35.2 K 2024-11-20T11:21:39,969 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f5aa82f77894bc8b6c7a048861a123a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732101697271 2024-11-20T11:21:39,970 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 669c44d92ee74cc1aaa8c2409441b1c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732101697256 2024-11-20T11:21:39,970 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 783f7a5671c84de3999f583d7db9e667, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732101697588 2024-11-20T11:21:39,970 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e234df5e5494253ad694f95bfc1d3b5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732101697271 2024-11-20T11:21:39,970 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca4890464c004a189076f8f409fbbc03, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732101697588 2024-11-20T11:21:39,976 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:39,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T11:21:39,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:39,977 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T11:21:39,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:39,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:39,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:39,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:39,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:39,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:39,978 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:39,981 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#B#compaction#421 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:39,981 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/ced9de796ade4769bdd77067413bf73e is 50, key is test_row_0/B:col10/1732101697588/Put/seqid=0 2024-11-20T11:21:39,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120436855d4d5ec4007932a892a6fe7063e_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101698715/Put/seqid=0 2024-11-20T11:21:39,985 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112067ce3bfbc8d7476e95ca61ca32ec7cab_2c782dcb9cddcab95f8c562ac4eee43c store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:39,987 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112067ce3bfbc8d7476e95ca61ca32ec7cab_2c782dcb9cddcab95f8c562ac4eee43c, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:39,987 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112067ce3bfbc8d7476e95ca61ca32ec7cab_2c782dcb9cddcab95f8c562ac4eee43c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:39,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742321_1497 (size=12154) 2024-11-20T11:21:39,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742320_1496 (size=12104) 2024-11-20T11:21:39,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,000 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/ced9de796ade4769bdd77067413bf73e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ced9de796ade4769bdd77067413bf73e 2024-11-20T11:21:40,001 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120436855d4d5ec4007932a892a6fe7063e_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120436855d4d5ec4007932a892a6fe7063e_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:40,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/d14bee3fb2bc41bab5f6e975ce661402, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:40,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/d14bee3fb2bc41bab5f6e975ce661402 is 175, key is test_row_0/A:col10/1732101698715/Put/seqid=0 2024-11-20T11:21:40,004 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/B of 2c782dcb9cddcab95f8c562ac4eee43c into ced9de796ade4769bdd77067413bf73e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:40,004 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:40,004 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/B, priority=13, startTime=1732101699968; duration=0sec 2024-11-20T11:21:40,004 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:40,004 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:B 2024-11-20T11:21:40,005 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:40,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742322_1498 (size=4469) 2024-11-20T11:21:40,006 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:40,006 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/C is initiating minor compaction (all files) 2024-11-20T11:21:40,006 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/C in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:40,007 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/d40935d20da44ebb8ab4303d47d3ea2a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/bcbf4686a7e94ed0a8f61ab61451aa90, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/67b6706c38a04fce8f330d2e08430fc3] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=35.2 K 2024-11-20T11:21:40,007 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting d40935d20da44ebb8ab4303d47d3ea2a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732101697256 2024-11-20T11:21:40,007 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcbf4686a7e94ed0a8f61ab61451aa90, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732101697271 2024-11-20T11:21:40,008 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67b6706c38a04fce8f330d2e08430fc3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732101697588 2024-11-20T11:21:40,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742323_1499 (size=30955) 2024-11-20T11:21:40,009 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/d14bee3fb2bc41bab5f6e975ce661402 2024-11-20T11:21:40,022 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#C#compaction#423 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:40,022 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/3bd90095352240c1b031f237564ee5c9 is 50, key is test_row_0/C:col10/1732101697588/Put/seqid=0 2024-11-20T11:21:40,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/cc1e6299b70a4a18bd7ad3249ea549c4 is 50, key is test_row_0/B:col10/1732101698715/Put/seqid=0 2024-11-20T11:21:40,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742325_1501 (size=12001) 2024-11-20T11:21:40,038 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/cc1e6299b70a4a18bd7ad3249ea549c4 2024-11-20T11:21:40,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742324_1500 (size=12104) 2024-11-20T11:21:40,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/6b72eaa0017e4a519745cff19518a691 is 50, key is test_row_0/C:col10/1732101698715/Put/seqid=0 2024-11-20T11:21:40,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742326_1502 (size=12001) 2024-11-20T11:21:40,407 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#A#compaction#420 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:40,408 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/a50d616f9ce04e1e9d3c1f03aebc41d3 is 175, key is test_row_0/A:col10/1732101697588/Put/seqid=0 2024-11-20T11:21:40,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742327_1503 (size=31058) 2024-11-20T11:21:40,416 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/a50d616f9ce04e1e9d3c1f03aebc41d3 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a50d616f9ce04e1e9d3c1f03aebc41d3 2024-11-20T11:21:40,420 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/A of 2c782dcb9cddcab95f8c562ac4eee43c into a50d616f9ce04e1e9d3c1f03aebc41d3(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:40,420 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:40,420 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/A, priority=13, startTime=1732101699968; duration=0sec 2024-11-20T11:21:40,421 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:40,421 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:A 2024-11-20T11:21:40,446 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/3bd90095352240c1b031f237564ee5c9 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/3bd90095352240c1b031f237564ee5c9 2024-11-20T11:21:40,450 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/C of 2c782dcb9cddcab95f8c562ac4eee43c into 3bd90095352240c1b031f237564ee5c9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:40,450 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:40,450 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/C, priority=13, startTime=1732101699968; duration=0sec 2024-11-20T11:21:40,450 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:40,450 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:C 2024-11-20T11:21:40,459 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/6b72eaa0017e4a519745cff19518a691 2024-11-20T11:21:40,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/d14bee3fb2bc41bab5f6e975ce661402 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d14bee3fb2bc41bab5f6e975ce661402 2024-11-20T11:21:40,465 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d14bee3fb2bc41bab5f6e975ce661402, entries=150, sequenceid=78, filesize=30.2 K 2024-11-20T11:21:40,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/cc1e6299b70a4a18bd7ad3249ea549c4 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/cc1e6299b70a4a18bd7ad3249ea549c4 2024-11-20T11:21:40,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T11:21:40,469 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/cc1e6299b70a4a18bd7ad3249ea549c4, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T11:21:40,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/6b72eaa0017e4a519745cff19518a691 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/6b72eaa0017e4a519745cff19518a691 2024-11-20T11:21:40,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,473 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/6b72eaa0017e4a519745cff19518a691, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T11:21:40,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,474 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=0 B/0 for 2c782dcb9cddcab95f8c562ac4eee43c in 497ms, sequenceid=78, compaction requested=false 2024-11-20T11:21:40,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:40,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:40,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-20T11:21:40,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-20T11:21:40,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,476 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-20T11:21:40,477 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1090 sec 2024-11-20T11:21:40,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.1130 sec 2024-11-20T11:21:40,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:40,865 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T11:21:40,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:40,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:40,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:40,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:40,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:40,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:40,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c2a778c6f5614e389dc19d6697b7c957_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101700864/Put/seqid=0 2024-11-20T11:21:40,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:40,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101760890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:40,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:40,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101760893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:40,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:40,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101760893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:40,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:40,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101760894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:40,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:40,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101760894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:40,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742329_1505 (size=24358) 2024-11-20T11:21:40,899 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:40,904 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c2a778c6f5614e389dc19d6697b7c957_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c2a778c6f5614e389dc19d6697b7c957_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:40,904 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/1b609dcbacf14d30805fa924b8321a65, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:40,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/1b609dcbacf14d30805fa924b8321a65 is 175, key is test_row_0/A:col10/1732101700864/Put/seqid=0 2024-11-20T11:21:40,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742328_1504 (size=73994) 2024-11-20T11:21:40,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:40,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101760995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101760998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101760998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101760998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101761002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101761199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101761201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101761201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101761202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101761205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,310 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/1b609dcbacf14d30805fa924b8321a65 2024-11-20T11:21:41,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/df82aab3592741f7a91e35cece687232 is 50, key is test_row_0/B:col10/1732101700864/Put/seqid=0 2024-11-20T11:21:41,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742330_1506 (size=12001) 2024-11-20T11:21:41,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T11:21:41,469 INFO [Thread-2175 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-20T11:21:41,470 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:41,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-20T11:21:41,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T11:21:41,472 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:41,472 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:41,473 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:41,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101761500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101761503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101761504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101761505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:41,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101761508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T11:21:41,624 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T11:21:41,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:41,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:41,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:41,625 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:41,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:41,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:41,721 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/df82aab3592741f7a91e35cece687232 2024-11-20T11:21:41,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/1f14a77624474c7d901e34f441cb4338 is 50, key is test_row_0/C:col10/1732101700864/Put/seqid=0 2024-11-20T11:21:41,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742331_1507 (size=12001) 2024-11-20T11:21:41,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T11:21:41,777 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T11:21:41,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:41,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:41,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:41,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:41,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:41,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:41,930 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:41,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T11:21:41,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:41,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:41,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:41,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:41,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:41,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:42,007 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101762005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:42,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101762007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:42,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:42,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101762008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:42,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:42,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101762010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:42,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101762015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:42,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T11:21:42,082 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:42,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T11:21:42,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:42,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:42,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:42,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:42,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:42,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:42,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/1f14a77624474c7d901e34f441cb4338 2024-11-20T11:21:42,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/1b609dcbacf14d30805fa924b8321a65 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1b609dcbacf14d30805fa924b8321a65 2024-11-20T11:21:42,138 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1b609dcbacf14d30805fa924b8321a65, entries=400, sequenceid=92, filesize=72.3 K 2024-11-20T11:21:42,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/df82aab3592741f7a91e35cece687232 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/df82aab3592741f7a91e35cece687232 2024-11-20T11:21:42,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/df82aab3592741f7a91e35cece687232, entries=150, sequenceid=92, filesize=11.7 K 2024-11-20T11:21:42,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/1f14a77624474c7d901e34f441cb4338 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/1f14a77624474c7d901e34f441cb4338 2024-11-20T11:21:42,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/1f14a77624474c7d901e34f441cb4338, entries=150, sequenceid=92, filesize=11.7 K 2024-11-20T11:21:42,146 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 2c782dcb9cddcab95f8c562ac4eee43c in 1280ms, sequenceid=92, compaction requested=true 2024-11-20T11:21:42,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:42,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:42,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:42,146 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:42,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:42,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:42,146 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:42,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:42,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:42,147 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:42,147 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136007 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:42,147 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/A is initiating minor compaction (all files) 2024-11-20T11:21:42,147 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/B is initiating minor compaction (all files) 2024-11-20T11:21:42,147 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/B in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:42,147 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/A in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:42,147 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ced9de796ade4769bdd77067413bf73e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/cc1e6299b70a4a18bd7ad3249ea549c4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/df82aab3592741f7a91e35cece687232] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=35.3 K 2024-11-20T11:21:42,147 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a50d616f9ce04e1e9d3c1f03aebc41d3, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d14bee3fb2bc41bab5f6e975ce661402, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1b609dcbacf14d30805fa924b8321a65] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=132.8 K 2024-11-20T11:21:42,147 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:42,147 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a50d616f9ce04e1e9d3c1f03aebc41d3, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d14bee3fb2bc41bab5f6e975ce661402, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1b609dcbacf14d30805fa924b8321a65] 2024-11-20T11:21:42,147 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting ced9de796ade4769bdd77067413bf73e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732101697588 2024-11-20T11:21:42,148 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting a50d616f9ce04e1e9d3c1f03aebc41d3, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732101697588 2024-11-20T11:21:42,148 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting cc1e6299b70a4a18bd7ad3249ea549c4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732101698708 2024-11-20T11:21:42,148 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting d14bee3fb2bc41bab5f6e975ce661402, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732101698708 2024-11-20T11:21:42,148 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting df82aab3592741f7a91e35cece687232, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732101700856 2024-11-20T11:21:42,148 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b609dcbacf14d30805fa924b8321a65, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732101700853 2024-11-20T11:21:42,153 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:42,153 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#B#compaction#429 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:42,154 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/ae559198f3b34c6d892b0159fd1d2232 is 50, key is test_row_0/B:col10/1732101700864/Put/seqid=0 2024-11-20T11:21:42,155 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112019e9f413ae8f46febb3296ebddee058d_2c782dcb9cddcab95f8c562ac4eee43c store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:42,158 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112019e9f413ae8f46febb3296ebddee058d_2c782dcb9cddcab95f8c562ac4eee43c, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:42,158 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112019e9f413ae8f46febb3296ebddee058d_2c782dcb9cddcab95f8c562ac4eee43c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:42,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742332_1508 (size=12207) 2024-11-20T11:21:42,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742333_1509 (size=4469) 2024-11-20T11:21:42,175 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/ae559198f3b34c6d892b0159fd1d2232 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ae559198f3b34c6d892b0159fd1d2232 2024-11-20T11:21:42,178 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/B of 2c782dcb9cddcab95f8c562ac4eee43c into ae559198f3b34c6d892b0159fd1d2232(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:42,178 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:42,178 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/B, priority=13, startTime=1732101702146; duration=0sec 2024-11-20T11:21:42,179 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:42,179 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:B 2024-11-20T11:21:42,179 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:42,180 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:42,180 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/C is initiating minor compaction (all files) 2024-11-20T11:21:42,180 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/C in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:42,180 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/3bd90095352240c1b031f237564ee5c9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/6b72eaa0017e4a519745cff19518a691, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/1f14a77624474c7d901e34f441cb4338] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=35.3 K 2024-11-20T11:21:42,180 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bd90095352240c1b031f237564ee5c9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732101697588 2024-11-20T11:21:42,180 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b72eaa0017e4a519745cff19518a691, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732101698708 2024-11-20T11:21:42,181 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f14a77624474c7d901e34f441cb4338, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732101700856 2024-11-20T11:21:42,188 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#C#compaction#431 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:42,189 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/2780df3f37124fa6856ef4966462091b is 50, key is test_row_0/C:col10/1732101700864/Put/seqid=0 2024-11-20T11:21:42,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742334_1510 (size=12207) 2024-11-20T11:21:42,197 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/2780df3f37124fa6856ef4966462091b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/2780df3f37124fa6856ef4966462091b 2024-11-20T11:21:42,203 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/C of 2c782dcb9cddcab95f8c562ac4eee43c into 2780df3f37124fa6856ef4966462091b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:42,203 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:42,203 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/C, priority=13, startTime=1732101702146; duration=0sec 2024-11-20T11:21:42,203 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:42,203 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:C 2024-11-20T11:21:42,235 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:42,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T11:21:42,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:42,236 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T11:21:42,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:42,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:42,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:42,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:42,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:42,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:42,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dd7c545e423549beb86294922d4cf57c_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101700893/Put/seqid=0 2024-11-20T11:21:42,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742335_1511 (size=12154) 2024-11-20T11:21:42,573 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#A#compaction#430 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:42,574 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/83b5bbb9df76455098a5add69e2bc7ad is 175, key is test_row_0/A:col10/1732101700864/Put/seqid=0 2024-11-20T11:21:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T11:21:42,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742336_1512 (size=31161) 2024-11-20T11:21:42,604 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/83b5bbb9df76455098a5add69e2bc7ad as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/83b5bbb9df76455098a5add69e2bc7ad 2024-11-20T11:21:42,609 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/A of 2c782dcb9cddcab95f8c562ac4eee43c into 83b5bbb9df76455098a5add69e2bc7ad(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:42,609 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:42,609 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/A, priority=13, startTime=1732101702146; duration=0sec 2024-11-20T11:21:42,609 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:42,609 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:A 2024-11-20T11:21:42,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:42,652 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dd7c545e423549beb86294922d4cf57c_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dd7c545e423549beb86294922d4cf57c_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:42,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/8b0ae284ca314fe38c282e6c74daa730, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:42,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/8b0ae284ca314fe38c282e6c74daa730 is 175, key is test_row_0/A:col10/1732101700893/Put/seqid=0 2024-11-20T11:21:42,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742337_1513 (size=30955) 2024-11-20T11:21:43,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:43,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:43,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101763045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101763045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101763046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101763046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101763046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,082 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/8b0ae284ca314fe38c282e6c74daa730 2024-11-20T11:21:43,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/17723e1a018545fcb77afa709a9ad0b3 is 50, key is test_row_0/B:col10/1732101700893/Put/seqid=0 2024-11-20T11:21:43,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742338_1514 (size=12001) 2024-11-20T11:21:43,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101763149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101763149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101763149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,151 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101763149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,151 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101763149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101763351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101763352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101763352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101763352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101763353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,502 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/17723e1a018545fcb77afa709a9ad0b3 2024-11-20T11:21:43,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/deb00a200b274f4cb82a37fe86c30dab is 50, key is test_row_0/C:col10/1732101700893/Put/seqid=0 2024-11-20T11:21:43,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742339_1515 (size=12001) 2024-11-20T11:21:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T11:21:43,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101763653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101763655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101763656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101763656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:43,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101763657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:43,916 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/deb00a200b274f4cb82a37fe86c30dab 2024-11-20T11:21:43,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/8b0ae284ca314fe38c282e6c74daa730 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/8b0ae284ca314fe38c282e6c74daa730 2024-11-20T11:21:43,928 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/8b0ae284ca314fe38c282e6c74daa730, entries=150, sequenceid=120, filesize=30.2 K 2024-11-20T11:21:43,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/17723e1a018545fcb77afa709a9ad0b3 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/17723e1a018545fcb77afa709a9ad0b3 2024-11-20T11:21:43,933 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/17723e1a018545fcb77afa709a9ad0b3, entries=150, sequenceid=120, filesize=11.7 K 2024-11-20T11:21:43,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/deb00a200b274f4cb82a37fe86c30dab as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/deb00a200b274f4cb82a37fe86c30dab 2024-11-20T11:21:43,943 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/deb00a200b274f4cb82a37fe86c30dab, entries=150, sequenceid=120, filesize=11.7 K 2024-11-20T11:21:43,944 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 2c782dcb9cddcab95f8c562ac4eee43c in 1707ms, sequenceid=120, compaction requested=false 2024-11-20T11:21:43,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:43,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:43,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-20T11:21:43,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-20T11:21:43,947 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-20T11:21:43,947 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4720 sec 2024-11-20T11:21:43,948 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 2.4770 sec 2024-11-20T11:21:44,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T11:21:44,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:44,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:44,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:44,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:44,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:44,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:44,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:44,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112033bd7b781b7f4fdca8ab5b1292fc6935_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101704158/Put/seqid=0 2024-11-20T11:21:44,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742340_1516 (size=12254) 2024-11-20T11:21:44,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101764178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101764179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101764179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101764180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101764181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101764282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101764282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101764282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101764283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101764283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101764484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101764485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101764485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101764486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101764486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,577 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:44,586 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112033bd7b781b7f4fdca8ab5b1292fc6935_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112033bd7b781b7f4fdca8ab5b1292fc6935_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:44,587 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/b0450ed2ecf9468184a499f4e1ba9cd1, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:44,587 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/b0450ed2ecf9468184a499f4e1ba9cd1 is 175, key is test_row_0/A:col10/1732101704158/Put/seqid=0 2024-11-20T11:21:44,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742341_1517 (size=31055) 2024-11-20T11:21:44,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101764789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101764789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101764789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101764789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:44,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101764791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:44,992 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/b0450ed2ecf9468184a499f4e1ba9cd1 2024-11-20T11:21:45,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/bc0a20663d494d57957ee69e5c1cae56 is 50, key is test_row_0/B:col10/1732101704158/Put/seqid=0 2024-11-20T11:21:45,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742342_1518 (size=12101) 2024-11-20T11:21:45,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/bc0a20663d494d57957ee69e5c1cae56 2024-11-20T11:21:45,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/0b9366762bef406986f17c971691ea45 is 50, key is test_row_0/C:col10/1732101704158/Put/seqid=0 2024-11-20T11:21:45,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742343_1519 (size=12101) 2024-11-20T11:21:45,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:45,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101765292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:45,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:45,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101765294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:45,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:45,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101765295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:45,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:45,298 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:45,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101765296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:45,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101765296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:45,434 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/0b9366762bef406986f17c971691ea45 2024-11-20T11:21:45,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/b0450ed2ecf9468184a499f4e1ba9cd1 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b0450ed2ecf9468184a499f4e1ba9cd1 2024-11-20T11:21:45,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b0450ed2ecf9468184a499f4e1ba9cd1, entries=150, sequenceid=134, filesize=30.3 K 2024-11-20T11:21:45,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/bc0a20663d494d57957ee69e5c1cae56 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/bc0a20663d494d57957ee69e5c1cae56 2024-11-20T11:21:45,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/bc0a20663d494d57957ee69e5c1cae56, entries=150, sequenceid=134, filesize=11.8 K 2024-11-20T11:21:45,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/0b9366762bef406986f17c971691ea45 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0b9366762bef406986f17c971691ea45 2024-11-20T11:21:45,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0b9366762bef406986f17c971691ea45, entries=150, sequenceid=134, filesize=11.8 K 2024-11-20T11:21:45,458 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 2c782dcb9cddcab95f8c562ac4eee43c in 1299ms, sequenceid=134, compaction requested=true 2024-11-20T11:21:45,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:45,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:45,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:45,459 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:45,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:45,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:45,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:45,459 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:45,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:45,459 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93171 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:45,459 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:45,460 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/B is initiating minor compaction (all files) 2024-11-20T11:21:45,460 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/A is initiating minor compaction (all files) 2024-11-20T11:21:45,460 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/B in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:45,460 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/A in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:45,460 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/83b5bbb9df76455098a5add69e2bc7ad, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/8b0ae284ca314fe38c282e6c74daa730, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b0450ed2ecf9468184a499f4e1ba9cd1] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=91.0 K 2024-11-20T11:21:45,460 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ae559198f3b34c6d892b0159fd1d2232, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/17723e1a018545fcb77afa709a9ad0b3, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/bc0a20663d494d57957ee69e5c1cae56] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=35.5 K 2024-11-20T11:21:45,460 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:45,460 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/83b5bbb9df76455098a5add69e2bc7ad, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/8b0ae284ca314fe38c282e6c74daa730, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b0450ed2ecf9468184a499f4e1ba9cd1] 2024-11-20T11:21:45,460 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83b5bbb9df76455098a5add69e2bc7ad, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732101700856 2024-11-20T11:21:45,460 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting ae559198f3b34c6d892b0159fd1d2232, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732101700856 2024-11-20T11:21:45,461 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b0ae284ca314fe38c282e6c74daa730, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732101700886 2024-11-20T11:21:45,461 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 17723e1a018545fcb77afa709a9ad0b3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732101700886 2024-11-20T11:21:45,461 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0450ed2ecf9468184a499f4e1ba9cd1, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732101703016 2024-11-20T11:21:45,461 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting bc0a20663d494d57957ee69e5c1cae56, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732101703016 2024-11-20T11:21:45,472 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:45,474 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#B#compaction#438 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:45,474 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/3fbf94b9e3084871aab1e059c46877a3 is 50, key is test_row_0/B:col10/1732101704158/Put/seqid=0 2024-11-20T11:21:45,476 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120e125f870dd124f8d8434d9e602786954_2c782dcb9cddcab95f8c562ac4eee43c store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:45,478 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120e125f870dd124f8d8434d9e602786954_2c782dcb9cddcab95f8c562ac4eee43c, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:45,479 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e125f870dd124f8d8434d9e602786954_2c782dcb9cddcab95f8c562ac4eee43c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:45,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742344_1520 (size=12409) 2024-11-20T11:21:45,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742345_1521 (size=4469) 2024-11-20T11:21:45,499 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#A#compaction#439 average throughput is 0.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:45,499 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/3307a9df84c74ac583486648e2f4306a is 175, key is test_row_0/A:col10/1732101704158/Put/seqid=0 2024-11-20T11:21:45,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742346_1522 (size=31363) 2024-11-20T11:21:45,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T11:21:45,577 INFO [Thread-2175 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-20T11:21:45,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:45,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-20T11:21:45,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T11:21:45,579 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:45,580 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:45,580 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T11:21:45,731 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:45,732 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T11:21:45,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:45,732 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T11:21:45,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:45,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:45,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:45,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:45,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:45,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:45,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d33f239fd67e4a27930d35a6bf8e759b_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101704178/Put/seqid=0 2024-11-20T11:21:45,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742347_1523 (size=12304) 2024-11-20T11:21:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T11:21:45,906 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/3fbf94b9e3084871aab1e059c46877a3 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/3fbf94b9e3084871aab1e059c46877a3 2024-11-20T11:21:45,908 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/3307a9df84c74ac583486648e2f4306a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/3307a9df84c74ac583486648e2f4306a 2024-11-20T11:21:45,910 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/B of 2c782dcb9cddcab95f8c562ac4eee43c into 3fbf94b9e3084871aab1e059c46877a3(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:45,911 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:45,911 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/B, priority=13, startTime=1732101705459; duration=0sec 2024-11-20T11:21:45,911 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:45,911 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:B 2024-11-20T11:21:45,911 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:45,912 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:45,912 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/C is initiating minor compaction (all files) 2024-11-20T11:21:45,912 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/C in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:45,912 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/2780df3f37124fa6856ef4966462091b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/deb00a200b274f4cb82a37fe86c30dab, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0b9366762bef406986f17c971691ea45] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=35.5 K 2024-11-20T11:21:45,912 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/A of 2c782dcb9cddcab95f8c562ac4eee43c into 3307a9df84c74ac583486648e2f4306a(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:45,912 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:45,912 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/A, priority=13, startTime=1732101705459; duration=0sec 2024-11-20T11:21:45,912 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:45,912 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:A 2024-11-20T11:21:45,912 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 2780df3f37124fa6856ef4966462091b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732101700856 2024-11-20T11:21:45,913 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting deb00a200b274f4cb82a37fe86c30dab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732101700886 2024-11-20T11:21:45,913 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b9366762bef406986f17c971691ea45, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732101703016 2024-11-20T11:21:45,918 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#C#compaction#441 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:45,919 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/b149ce8445c642d0810d9ed2366d6d62 is 50, key is test_row_0/C:col10/1732101704158/Put/seqid=0 2024-11-20T11:21:45,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742348_1524 (size=12409) 2024-11-20T11:21:45,926 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/b149ce8445c642d0810d9ed2366d6d62 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/b149ce8445c642d0810d9ed2366d6d62 2024-11-20T11:21:45,929 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/C of 2c782dcb9cddcab95f8c562ac4eee43c into b149ce8445c642d0810d9ed2366d6d62(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:45,929 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:45,929 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/C, priority=13, startTime=1732101705459; duration=0sec 2024-11-20T11:21:45,930 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:45,930 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:C 2024-11-20T11:21:46,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:46,147 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d33f239fd67e4a27930d35a6bf8e759b_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d33f239fd67e4a27930d35a6bf8e759b_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:46,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/c0406244946741a5ac6b91abd9e38ec0, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:46,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/c0406244946741a5ac6b91abd9e38ec0 is 175, key is test_row_0/A:col10/1732101704178/Put/seqid=0 2024-11-20T11:21:46,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742349_1525 (size=31105) 2024-11-20T11:21:46,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T11:21:46,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:46,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:46,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101766306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101766307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101766307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101766307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101766308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101766411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101766411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101766411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101766411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101766411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,553 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/c0406244946741a5ac6b91abd9e38ec0 2024-11-20T11:21:46,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/5aa421e6f5c64e1e83faeb11d9c03a73 is 50, key is test_row_0/B:col10/1732101704178/Put/seqid=0 2024-11-20T11:21:46,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742350_1526 (size=12151) 2024-11-20T11:21:46,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101766613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101766613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101766613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101766614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101766615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T11:21:46,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101766917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101766917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101766917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101766918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:46,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101766918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:46,969 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/5aa421e6f5c64e1e83faeb11d9c03a73 2024-11-20T11:21:46,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/d661693c006449f29275b40d3e270aa6 is 50, key is test_row_0/C:col10/1732101704178/Put/seqid=0 2024-11-20T11:21:46,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742351_1527 (size=12151) 2024-11-20T11:21:46,989 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/d661693c006449f29275b40d3e270aa6 2024-11-20T11:21:46,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/c0406244946741a5ac6b91abd9e38ec0 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c0406244946741a5ac6b91abd9e38ec0 2024-11-20T11:21:46,999 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c0406244946741a5ac6b91abd9e38ec0, entries=150, sequenceid=157, filesize=30.4 K 2024-11-20T11:21:47,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/5aa421e6f5c64e1e83faeb11d9c03a73 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/5aa421e6f5c64e1e83faeb11d9c03a73 2024-11-20T11:21:47,004 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/5aa421e6f5c64e1e83faeb11d9c03a73, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T11:21:47,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/d661693c006449f29275b40d3e270aa6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/d661693c006449f29275b40d3e270aa6 2024-11-20T11:21:47,008 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/d661693c006449f29275b40d3e270aa6, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T11:21:47,009 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 2c782dcb9cddcab95f8c562ac4eee43c in 1277ms, sequenceid=157, compaction requested=false 2024-11-20T11:21:47,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:47,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:47,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-20T11:21:47,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-20T11:21:47,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-20T11:21:47,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4300 sec 2024-11-20T11:21:47,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.4330 sec 2024-11-20T11:21:47,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:47,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T11:21:47,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:47,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:47,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:47,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:47,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:47,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:47,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206a3417395c294824b634e22fca758409_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101707422/Put/seqid=0 2024-11-20T11:21:47,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742352_1528 (size=17284) 2024-11-20T11:21:47,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101767450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101767451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101767454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101767466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101767466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101767567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101767567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101767567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101767577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101767577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T11:21:47,683 INFO [Thread-2175 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-20T11:21:47,684 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:47,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-20T11:21:47,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T11:21:47,686 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:47,686 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:47,686 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:47,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101767770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101767770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101767770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101767778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:47,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101767780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T11:21:47,837 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T11:21:47,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:47,838 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:47,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:47,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:47,838 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:47,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:47,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:47,843 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206a3417395c294824b634e22fca758409_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206a3417395c294824b634e22fca758409_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:47,848 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/1aa1c0ed28c148d1b309f2d25be760a4, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:47,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/1aa1c0ed28c148d1b309f2d25be760a4 is 175, key is test_row_0/A:col10/1732101707422/Put/seqid=0 2024-11-20T11:21:47,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742353_1529 (size=48389) 2024-11-20T11:21:47,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T11:21:47,990 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:47,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T11:21:47,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:47,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:47,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:47,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:47,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:47,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:48,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101768074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:48,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:48,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101768076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101768076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:48,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101768081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:48,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101768083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,143 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T11:21:48,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:48,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:48,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:48,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,253 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=176, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/1aa1c0ed28c148d1b309f2d25be760a4 2024-11-20T11:21:48,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/682ec78c85e94d16893fb36d9a6c6a06 is 50, key is test_row_0/B:col10/1732101707422/Put/seqid=0 2024-11-20T11:21:48,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742354_1530 (size=12151) 2024-11-20T11:21:48,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T11:21:48,296 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T11:21:48,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:48,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:48,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:48,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,449 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T11:21:48,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:48,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:48,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:48,450 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:48,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101768580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:48,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101768581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:48,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101768582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:48,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101768587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:48,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101768590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,602 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T11:21:48,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:48,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:48,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:48,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/682ec78c85e94d16893fb36d9a6c6a06 2024-11-20T11:21:48,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/f50437251bad4d6fab69da4ec7d6a666 is 50, key is test_row_0/C:col10/1732101707422/Put/seqid=0 2024-11-20T11:21:48,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742355_1531 (size=12151) 2024-11-20T11:21:48,755 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,755 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T11:21:48,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:48,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:48,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:48,755 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T11:21:48,907 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:48,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T11:21:48,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:48,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:48,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:48,909 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:48,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:49,061 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T11:21:49,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:49,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:49,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:49,062 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:49,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:49,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:49,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/f50437251bad4d6fab69da4ec7d6a666 2024-11-20T11:21:49,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/1aa1c0ed28c148d1b309f2d25be760a4 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1aa1c0ed28c148d1b309f2d25be760a4 2024-11-20T11:21:49,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1aa1c0ed28c148d1b309f2d25be760a4, entries=250, sequenceid=176, filesize=47.3 K 2024-11-20T11:21:49,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/682ec78c85e94d16893fb36d9a6c6a06 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/682ec78c85e94d16893fb36d9a6c6a06 2024-11-20T11:21:49,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/682ec78c85e94d16893fb36d9a6c6a06, entries=150, sequenceid=176, filesize=11.9 K 2024-11-20T11:21:49,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/f50437251bad4d6fab69da4ec7d6a666 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/f50437251bad4d6fab69da4ec7d6a666 2024-11-20T11:21:49,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/f50437251bad4d6fab69da4ec7d6a666, entries=150, sequenceid=176, filesize=11.9 K 2024-11-20T11:21:49,102 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 2c782dcb9cddcab95f8c562ac4eee43c in 1679ms, sequenceid=176, compaction requested=true 2024-11-20T11:21:49,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:49,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:49,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:49,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:49,103 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:49,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:49,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:49,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T11:21:49,103 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:49,104 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:49,104 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:49,104 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/B is initiating minor compaction (all files) 2024-11-20T11:21:49,104 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/A is initiating minor compaction (all files) 2024-11-20T11:21:49,104 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/A in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:49,104 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/B in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:49,104 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/3fbf94b9e3084871aab1e059c46877a3, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/5aa421e6f5c64e1e83faeb11d9c03a73, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/682ec78c85e94d16893fb36d9a6c6a06] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=35.9 K 2024-11-20T11:21:49,104 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/3307a9df84c74ac583486648e2f4306a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c0406244946741a5ac6b91abd9e38ec0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1aa1c0ed28c148d1b309f2d25be760a4] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=108.3 K 2024-11-20T11:21:49,104 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:49,104 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/3307a9df84c74ac583486648e2f4306a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c0406244946741a5ac6b91abd9e38ec0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1aa1c0ed28c148d1b309f2d25be760a4] 2024-11-20T11:21:49,105 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fbf94b9e3084871aab1e059c46877a3, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732101703016 2024-11-20T11:21:49,105 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3307a9df84c74ac583486648e2f4306a, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732101703016 2024-11-20T11:21:49,106 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0406244946741a5ac6b91abd9e38ec0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732101704176 2024-11-20T11:21:49,106 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 5aa421e6f5c64e1e83faeb11d9c03a73, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732101704176 2024-11-20T11:21:49,107 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 682ec78c85e94d16893fb36d9a6c6a06, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732101706306 2024-11-20T11:21:49,107 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1aa1c0ed28c148d1b309f2d25be760a4, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732101706303 2024-11-20T11:21:49,125 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:49,125 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#B#compaction#447 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:49,126 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/0b92bc25f9c64ecba0509b1ce5536c1b is 50, key is test_row_0/B:col10/1732101707422/Put/seqid=0 2024-11-20T11:21:49,129 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411203c43a8f7c82a40baafc790dcca6f2cb3_2c782dcb9cddcab95f8c562ac4eee43c store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:49,130 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411203c43a8f7c82a40baafc790dcca6f2cb3_2c782dcb9cddcab95f8c562ac4eee43c, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:49,130 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203c43a8f7c82a40baafc790dcca6f2cb3_2c782dcb9cddcab95f8c562ac4eee43c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:49,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742356_1532 (size=12561) 2024-11-20T11:21:49,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742357_1533 (size=4469) 2024-11-20T11:21:49,172 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#A#compaction#448 average throughput is 0.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:49,173 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/0924388a14f441bb899c154e9b184189 is 175, key is test_row_0/A:col10/1732101707422/Put/seqid=0 2024-11-20T11:21:49,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742358_1534 (size=31515) 2024-11-20T11:21:49,214 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T11:21:49,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:49,215 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T11:21:49,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:49,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:49,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:49,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:49,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:49,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:49,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204625e8976a77409c9db8c353bad82c18_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101707445/Put/seqid=0 2024-11-20T11:21:49,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742359_1535 (size=12304) 2024-11-20T11:21:49,547 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/0b92bc25f9c64ecba0509b1ce5536c1b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/0b92bc25f9c64ecba0509b1ce5536c1b 2024-11-20T11:21:49,551 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/B of 2c782dcb9cddcab95f8c562ac4eee43c into 0b92bc25f9c64ecba0509b1ce5536c1b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:49,551 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:49,551 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/B, priority=13, startTime=1732101709102; duration=0sec 2024-11-20T11:21:49,551 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:49,551 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:B 2024-11-20T11:21:49,551 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:49,552 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:49,552 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/C is initiating minor compaction (all files) 2024-11-20T11:21:49,552 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/C in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:49,552 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/b149ce8445c642d0810d9ed2366d6d62, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/d661693c006449f29275b40d3e270aa6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/f50437251bad4d6fab69da4ec7d6a666] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=35.9 K 2024-11-20T11:21:49,553 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting b149ce8445c642d0810d9ed2366d6d62, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1732101703016 2024-11-20T11:21:49,553 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting d661693c006449f29275b40d3e270aa6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732101704176 2024-11-20T11:21:49,553 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting f50437251bad4d6fab69da4ec7d6a666, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732101706306 2024-11-20T11:21:49,559 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#C#compaction#450 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:49,560 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/051c3cd9a7f34e2296f22d40aa90ed89 is 50, key is test_row_0/C:col10/1732101707422/Put/seqid=0 2024-11-20T11:21:49,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742360_1536 (size=12561) 2024-11-20T11:21:49,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:49,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:49,591 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/0924388a14f441bb899c154e9b184189 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/0924388a14f441bb899c154e9b184189 2024-11-20T11:21:49,594 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/A of 2c782dcb9cddcab95f8c562ac4eee43c into 0924388a14f441bb899c154e9b184189(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:49,595 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:49,595 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/A, priority=13, startTime=1732101709102; duration=0sec 2024-11-20T11:21:49,595 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:49,595 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:A 2024-11-20T11:21:49,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101769598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101769598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101769599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101769600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101769600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:49,642 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204625e8976a77409c9db8c353bad82c18_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204625e8976a77409c9db8c353bad82c18_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:49,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/77d70493447b433d8bb6699c5ab82f87, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:49,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/77d70493447b433d8bb6699c5ab82f87 is 175, key is test_row_0/A:col10/1732101707445/Put/seqid=0 2024-11-20T11:21:49,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742361_1537 (size=31105) 2024-11-20T11:21:49,653 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/77d70493447b433d8bb6699c5ab82f87 2024-11-20T11:21:49,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/fc830ece556448b99632ab56b9a4e161 is 50, key is test_row_0/B:col10/1732101707445/Put/seqid=0 2024-11-20T11:21:49,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742362_1538 (size=12151) 2024-11-20T11:21:49,704 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101769703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101769702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101769703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101769703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T11:21:49,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101769905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101769905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101769905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:49,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101769906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:49,968 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/051c3cd9a7f34e2296f22d40aa90ed89 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/051c3cd9a7f34e2296f22d40aa90ed89 2024-11-20T11:21:49,976 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/C of 2c782dcb9cddcab95f8c562ac4eee43c into 051c3cd9a7f34e2296f22d40aa90ed89(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:49,976 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:49,976 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/C, priority=13, startTime=1732101709103; duration=0sec 2024-11-20T11:21:49,977 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:49,977 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:C 2024-11-20T11:21:50,075 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/fc830ece556448b99632ab56b9a4e161 2024-11-20T11:21:50,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/3ecfe82a7c9b452ab4ff987f873e15a2 is 50, key is test_row_0/C:col10/1732101707445/Put/seqid=0 2024-11-20T11:21:50,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742363_1539 (size=12151) 2024-11-20T11:21:50,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:50,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101770208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:50,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:50,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101770210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:50,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:50,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101770210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:50,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:50,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101770211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:50,489 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/3ecfe82a7c9b452ab4ff987f873e15a2 2024-11-20T11:21:50,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/77d70493447b433d8bb6699c5ab82f87 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/77d70493447b433d8bb6699c5ab82f87 2024-11-20T11:21:50,498 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/77d70493447b433d8bb6699c5ab82f87, entries=150, sequenceid=197, filesize=30.4 K 2024-11-20T11:21:50,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/fc830ece556448b99632ab56b9a4e161 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/fc830ece556448b99632ab56b9a4e161 2024-11-20T11:21:50,508 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/fc830ece556448b99632ab56b9a4e161, entries=150, sequenceid=197, filesize=11.9 K 2024-11-20T11:21:50,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/3ecfe82a7c9b452ab4ff987f873e15a2 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/3ecfe82a7c9b452ab4ff987f873e15a2 2024-11-20T11:21:50,513 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/3ecfe82a7c9b452ab4ff987f873e15a2, entries=150, sequenceid=197, filesize=11.9 K 2024-11-20T11:21:50,514 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 2c782dcb9cddcab95f8c562ac4eee43c in 1300ms, sequenceid=197, compaction requested=false 2024-11-20T11:21:50,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:50,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:50,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-20T11:21:50,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-20T11:21:50,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-20T11:21:50,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8290 sec 2024-11-20T11:21:50,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 2.8330 sec 2024-11-20T11:21:50,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:50,743 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T11:21:50,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:50,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:50,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:50,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:50,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:50,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:50,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c2d68b26ef2d432db5dcecaecd4f5d4a_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101710743/Put/seqid=0 2024-11-20T11:21:50,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742364_1540 (size=12304) 2024-11-20T11:21:50,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:50,764 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:50,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101770762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:50,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:50,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101770762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:50,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:50,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101770763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:50,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:50,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101770764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:50,768 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c2d68b26ef2d432db5dcecaecd4f5d4a_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c2d68b26ef2d432db5dcecaecd4f5d4a_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:50,768 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/1e05e2003662454e8583c57314fbc306, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:50,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/1e05e2003662454e8583c57314fbc306 is 175, key is test_row_0/A:col10/1732101710743/Put/seqid=0 2024-11-20T11:21:50,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742365_1541 (size=31105) 2024-11-20T11:21:50,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:50,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101770865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:50,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:50,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101770866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:50,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:50,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101770867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:50,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:50,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101770868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101771068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101771068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101771070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101771072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,178 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=220, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/1e05e2003662454e8583c57314fbc306 2024-11-20T11:21:51,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/1d57a224d53b481fbfb101fe1b68c477 is 50, key is test_row_0/B:col10/1732101710743/Put/seqid=0 2024-11-20T11:21:51,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742366_1542 (size=12151) 2024-11-20T11:21:51,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101771371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101771373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101771374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101771374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,590 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/1d57a224d53b481fbfb101fe1b68c477 2024-11-20T11:21:51,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/89d2b3f36e5243c9badb580dae7f8a0a is 50, key is test_row_0/C:col10/1732101710743/Put/seqid=0 2024-11-20T11:21:51,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742367_1543 (size=12151) 2024-11-20T11:21:51,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101771613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,615 DEBUG [Thread-2169 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:21:51,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T11:21:51,790 INFO [Thread-2175 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-20T11:21:51,791 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:51,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-20T11:21:51,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T11:21:51,793 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:51,793 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:51,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:51,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101771873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101771876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101771877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:51,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101771878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T11:21:51,945 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:51,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-20T11:21:51,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:51,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:51,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:51,946 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:51,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:51,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:52,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/89d2b3f36e5243c9badb580dae7f8a0a 2024-11-20T11:21:52,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/1e05e2003662454e8583c57314fbc306 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1e05e2003662454e8583c57314fbc306 2024-11-20T11:21:52,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1e05e2003662454e8583c57314fbc306, entries=150, sequenceid=220, filesize=30.4 K 2024-11-20T11:21:52,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/1d57a224d53b481fbfb101fe1b68c477 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1d57a224d53b481fbfb101fe1b68c477 2024-11-20T11:21:52,012 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1d57a224d53b481fbfb101fe1b68c477, entries=150, sequenceid=220, filesize=11.9 K 2024-11-20T11:21:52,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/89d2b3f36e5243c9badb580dae7f8a0a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/89d2b3f36e5243c9badb580dae7f8a0a 2024-11-20T11:21:52,016 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/89d2b3f36e5243c9badb580dae7f8a0a, entries=150, sequenceid=220, filesize=11.9 K 2024-11-20T11:21:52,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 2c782dcb9cddcab95f8c562ac4eee43c in 1274ms, sequenceid=220, compaction requested=true 2024-11-20T11:21:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:52,017 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:52,017 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:52,018 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:52,018 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:52,018 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/A is initiating minor compaction (all files) 2024-11-20T11:21:52,018 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/B is initiating minor compaction (all files) 2024-11-20T11:21:52,018 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/A in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:52,018 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/B in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:52,019 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/0924388a14f441bb899c154e9b184189, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/77d70493447b433d8bb6699c5ab82f87, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1e05e2003662454e8583c57314fbc306] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=91.5 K 2024-11-20T11:21:52,019 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/0b92bc25f9c64ecba0509b1ce5536c1b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/fc830ece556448b99632ab56b9a4e161, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1d57a224d53b481fbfb101fe1b68c477] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=36.0 K 2024-11-20T11:21:52,019 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:52,019 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/0924388a14f441bb899c154e9b184189, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/77d70493447b433d8bb6699c5ab82f87, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1e05e2003662454e8583c57314fbc306] 2024-11-20T11:21:52,019 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b92bc25f9c64ecba0509b1ce5536c1b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732101706306 2024-11-20T11:21:52,019 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0924388a14f441bb899c154e9b184189, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732101706306 2024-11-20T11:21:52,019 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77d70493447b433d8bb6699c5ab82f87, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732101707445 2024-11-20T11:21:52,019 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting fc830ece556448b99632ab56b9a4e161, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732101707445 2024-11-20T11:21:52,019 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e05e2003662454e8583c57314fbc306, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732101710715 2024-11-20T11:21:52,020 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d57a224d53b481fbfb101fe1b68c477, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732101710715 2024-11-20T11:21:52,025 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:52,026 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#B#compaction#456 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:52,027 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/1fdbe5a4a71e414f97d8483849527e96 is 50, key is test_row_0/B:col10/1732101710743/Put/seqid=0 2024-11-20T11:21:52,028 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120c4879979c9504d34af5a6e8338cea57f_2c782dcb9cddcab95f8c562ac4eee43c store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:52,030 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120c4879979c9504d34af5a6e8338cea57f_2c782dcb9cddcab95f8c562ac4eee43c, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:52,030 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c4879979c9504d34af5a6e8338cea57f_2c782dcb9cddcab95f8c562ac4eee43c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:52,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742368_1544 (size=12663) 2024-11-20T11:21:52,067 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/1fdbe5a4a71e414f97d8483849527e96 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1fdbe5a4a71e414f97d8483849527e96 2024-11-20T11:21:52,072 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/B of 2c782dcb9cddcab95f8c562ac4eee43c into 1fdbe5a4a71e414f97d8483849527e96(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:52,072 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:52,072 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/B, priority=13, startTime=1732101712017; duration=0sec 2024-11-20T11:21:52,072 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:52,072 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:B 2024-11-20T11:21:52,072 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:52,074 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:52,074 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/C is initiating minor compaction (all files) 2024-11-20T11:21:52,074 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/C in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:52,074 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/051c3cd9a7f34e2296f22d40aa90ed89, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/3ecfe82a7c9b452ab4ff987f873e15a2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/89d2b3f36e5243c9badb580dae7f8a0a] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=36.0 K 2024-11-20T11:21:52,075 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 051c3cd9a7f34e2296f22d40aa90ed89, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732101706306 2024-11-20T11:21:52,075 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ecfe82a7c9b452ab4ff987f873e15a2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732101707445 2024-11-20T11:21:52,075 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 89d2b3f36e5243c9badb580dae7f8a0a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732101710715 2024-11-20T11:21:52,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742369_1545 (size=4469) 2024-11-20T11:21:52,078 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#A#compaction#457 average throughput is 0.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:52,079 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/b1e254b116364a619b57beab440d5b9e is 175, key is test_row_0/A:col10/1732101710743/Put/seqid=0 2024-11-20T11:21:52,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742370_1546 (size=31617) 2024-11-20T11:21:52,085 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#C#compaction#458 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:52,086 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/ea726b822597415082cbdde2cde0cd9e is 50, key is test_row_0/C:col10/1732101710743/Put/seqid=0 2024-11-20T11:21:52,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742371_1547 (size=12663) 2024-11-20T11:21:52,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T11:21:52,098 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:52,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-20T11:21:52,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:52,098 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T11:21:52,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:52,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:52,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:52,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:52,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:52,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:52,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120833ddc9cbfdc43a2bd554c1717b3d82b_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101710761/Put/seqid=0 2024-11-20T11:21:52,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742372_1548 (size=12304) 2024-11-20T11:21:52,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,114 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120833ddc9cbfdc43a2bd554c1717b3d82b_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120833ddc9cbfdc43a2bd554c1717b3d82b_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:52,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/596fb69be4174aa989a76bac13d86175, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:52,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/596fb69be4174aa989a76bac13d86175 is 175, key is test_row_0/A:col10/1732101710761/Put/seqid=0 2024-11-20T11:21:52,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742373_1549 (size=31105) 2024-11-20T11:21:52,119 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/596fb69be4174aa989a76bac13d86175 2024-11-20T11:21:52,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/1028f87098bc4442a31b1734dfa460ef is 50, key is test_row_0/B:col10/1732101710761/Put/seqid=0 2024-11-20T11:21:52,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742374_1550 (size=12151) 2024-11-20T11:21:52,129 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/1028f87098bc4442a31b1734dfa460ef 2024-11-20T11:21:52,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/0aa3eebbeb794e63ba81cec349bf8674 is 50, key is test_row_0/C:col10/1732101710761/Put/seqid=0 2024-11-20T11:21:52,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742375_1551 (size=12151) 2024-11-20T11:21:52,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T11:21:52,488 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/b1e254b116364a619b57beab440d5b9e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b1e254b116364a619b57beab440d5b9e 2024-11-20T11:21:52,492 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/A of 2c782dcb9cddcab95f8c562ac4eee43c into b1e254b116364a619b57beab440d5b9e(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:52,492 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:52,492 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/A, priority=13, startTime=1732101712017; duration=0sec 2024-11-20T11:21:52,492 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:52,492 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:A 2024-11-20T11:21:52,494 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/ea726b822597415082cbdde2cde0cd9e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/ea726b822597415082cbdde2cde0cd9e 2024-11-20T11:21:52,497 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/C of 2c782dcb9cddcab95f8c562ac4eee43c into ea726b822597415082cbdde2cde0cd9e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:52,497 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:52,497 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/C, priority=13, startTime=1732101712017; duration=0sec 2024-11-20T11:21:52,497 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:52,497 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:C 2024-11-20T11:21:52,541 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/0aa3eebbeb794e63ba81cec349bf8674 2024-11-20T11:21:52,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/596fb69be4174aa989a76bac13d86175 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/596fb69be4174aa989a76bac13d86175 2024-11-20T11:21:52,548 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/596fb69be4174aa989a76bac13d86175, entries=150, sequenceid=237, filesize=30.4 K 2024-11-20T11:21:52,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/1028f87098bc4442a31b1734dfa460ef as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1028f87098bc4442a31b1734dfa460ef 2024-11-20T11:21:52,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,552 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1028f87098bc4442a31b1734dfa460ef, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T11:21:52,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/0aa3eebbeb794e63ba81cec349bf8674 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0aa3eebbeb794e63ba81cec349bf8674 2024-11-20T11:21:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,556 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0aa3eebbeb794e63ba81cec349bf8674, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T11:21:52,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,557 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=0 B/0 for 2c782dcb9cddcab95f8c562ac4eee43c in 459ms, sequenceid=237, compaction requested=false 2024-11-20T11:21:52,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:52,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:52,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-20T11:21:52,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-20T11:21:52,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-20T11:21:52,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 765 msec 2024-11-20T11:21:52,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,564 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 770 msec 2024-11-20T11:21:52,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T11:21:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,896 INFO [Thread-2175 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-20T11:21:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,897 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:52,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-11-20T11:21:52,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T11:21:52,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,899 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:52,899 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:52,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,899 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:52,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:52,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T11:21:52,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:52,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:52,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:52,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:52,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:52,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:52,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:52,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120eb2025115fc04653b53f2d4c29ad4797_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101712907/Put/seqid=0 2024-11-20T11:21:52,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:52,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101772951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:52,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:52,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101772953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:52,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:52,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101772953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:52,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:52,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101772956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:52,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T11:21:53,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742377_1553 (size=24758) 2024-11-20T11:21:53,052 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:53,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:53,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,053 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:53,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101773058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:53,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101773059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:53,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101773060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:53,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101773061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T11:21:53,206 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:53,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101773263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101773263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101773263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101773265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,359 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,407 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:53,414 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120eb2025115fc04653b53f2d4c29ad4797_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120eb2025115fc04653b53f2d4c29ad4797_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:53,415 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/d699ff2928c549d7a6ac6fe6e97b0f4e, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:53,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/d699ff2928c549d7a6ac6fe6e97b0f4e is 175, key is test_row_0/A:col10/1732101712907/Put/seqid=0 2024-11-20T11:21:53,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742376_1552 (size=74395) 2024-11-20T11:21:53,425 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/d699ff2928c549d7a6ac6fe6e97b0f4e 2024-11-20T11:21:53,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/aa60acc3bfc84136aaa8bc34ee972d47 is 50, key is test_row_0/B:col10/1732101712907/Put/seqid=0 2024-11-20T11:21:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T11:21:53,513 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742378_1554 (size=12151) 2024-11-20T11:21:53,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:53,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:53,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/aa60acc3bfc84136aaa8bc34ee972d47 2024-11-20T11:21:53,514 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/b6bbbd83c6844ac1ba062e21318c68bc is 50, key is test_row_0/C:col10/1732101712907/Put/seqid=0 2024-11-20T11:21:53,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101773568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101773568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101773568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101773569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742379_1555 (size=12151) 2024-11-20T11:21:53,667 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:53,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:53,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,820 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:53,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:53,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,822 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,975 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:53,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:53,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:53,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:53,976 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:53,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/b6bbbd83c6844ac1ba062e21318c68bc 2024-11-20T11:21:53,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/d699ff2928c549d7a6ac6fe6e97b0f4e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d699ff2928c549d7a6ac6fe6e97b0f4e 2024-11-20T11:21:53,990 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d699ff2928c549d7a6ac6fe6e97b0f4e, entries=400, sequenceid=250, filesize=72.7 K 2024-11-20T11:21:53,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/aa60acc3bfc84136aaa8bc34ee972d47 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/aa60acc3bfc84136aaa8bc34ee972d47 2024-11-20T11:21:53,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/aa60acc3bfc84136aaa8bc34ee972d47, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T11:21:53,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/b6bbbd83c6844ac1ba062e21318c68bc as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/b6bbbd83c6844ac1ba062e21318c68bc 2024-11-20T11:21:53,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/b6bbbd83c6844ac1ba062e21318c68bc, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T11:21:54,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T11:21:54,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 2c782dcb9cddcab95f8c562ac4eee43c in 1088ms, sequenceid=250, compaction requested=true 2024-11-20T11:21:54,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:54,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:54,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:54,004 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:54,004 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:54,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:54,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:54,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:54,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:54,005 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 137117 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:54,005 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/A is initiating minor compaction (all files) 2024-11-20T11:21:54,005 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/A in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,005 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b1e254b116364a619b57beab440d5b9e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/596fb69be4174aa989a76bac13d86175, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d699ff2928c549d7a6ac6fe6e97b0f4e] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=133.9 K 2024-11-20T11:21:54,005 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,005 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b1e254b116364a619b57beab440d5b9e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/596fb69be4174aa989a76bac13d86175, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d699ff2928c549d7a6ac6fe6e97b0f4e] 2024-11-20T11:21:54,006 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:54,006 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/B is initiating minor compaction (all files) 2024-11-20T11:21:54,006 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/B in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,006 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1fdbe5a4a71e414f97d8483849527e96, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1028f87098bc4442a31b1734dfa460ef, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/aa60acc3bfc84136aaa8bc34ee972d47] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=36.1 K 2024-11-20T11:21:54,006 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1e254b116364a619b57beab440d5b9e, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732101710715 2024-11-20T11:21:54,006 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fdbe5a4a71e414f97d8483849527e96, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732101710715 2024-11-20T11:21:54,007 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 1028f87098bc4442a31b1734dfa460ef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732101710760 2024-11-20T11:21:54,007 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 596fb69be4174aa989a76bac13d86175, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732101710760 2024-11-20T11:21:54,007 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting aa60acc3bfc84136aaa8bc34ee972d47, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732101712907 2024-11-20T11:21:54,007 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting d699ff2928c549d7a6ac6fe6e97b0f4e, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732101712903 2024-11-20T11:21:54,022 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#B#compaction#465 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:54,023 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/c38e18b15923413cac710906987a034e is 50, key is test_row_0/B:col10/1732101712907/Put/seqid=0 2024-11-20T11:21:54,024 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:54,032 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120cb84b321836c4b32b507a09af2270eb0_2c782dcb9cddcab95f8c562ac4eee43c store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:54,036 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120cb84b321836c4b32b507a09af2270eb0_2c782dcb9cddcab95f8c562ac4eee43c, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:54,036 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cb84b321836c4b32b507a09af2270eb0_2c782dcb9cddcab95f8c562ac4eee43c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:54,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742380_1556 (size=12765) 2024-11-20T11:21:54,042 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/c38e18b15923413cac710906987a034e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/c38e18b15923413cac710906987a034e 2024-11-20T11:21:54,046 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/B of 2c782dcb9cddcab95f8c562ac4eee43c into c38e18b15923413cac710906987a034e(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:54,046 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:54,046 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/B, priority=13, startTime=1732101714004; duration=0sec 2024-11-20T11:21:54,046 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:54,046 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:B 2024-11-20T11:21:54,047 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T11:21:54,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742381_1557 (size=4469) 2024-11-20T11:21:54,048 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T11:21:54,048 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/C is initiating minor compaction (all files) 2024-11-20T11:21:54,048 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/C in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,049 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/ea726b822597415082cbdde2cde0cd9e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0aa3eebbeb794e63ba81cec349bf8674, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/b6bbbd83c6844ac1ba062e21318c68bc] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=36.1 K 2024-11-20T11:21:54,049 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting ea726b822597415082cbdde2cde0cd9e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732101710715 2024-11-20T11:21:54,049 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#A#compaction#466 average throughput is 0.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:54,050 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/c7506490641b44c299fe934404a8134c is 175, key is test_row_0/A:col10/1732101712907/Put/seqid=0 2024-11-20T11:21:54,050 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 0aa3eebbeb794e63ba81cec349bf8674, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732101710760 2024-11-20T11:21:54,051 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting b6bbbd83c6844ac1ba062e21318c68bc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732101712907 2024-11-20T11:21:54,059 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#C#compaction#467 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:54,059 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/6b9258a9c7ca40148db3dbb285f0b7a6 is 50, key is test_row_0/C:col10/1732101712907/Put/seqid=0 2024-11-20T11:21:54,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742382_1558 (size=31719) 2024-11-20T11:21:54,065 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/c7506490641b44c299fe934404a8134c as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c7506490641b44c299fe934404a8134c 2024-11-20T11:21:54,071 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/A of 2c782dcb9cddcab95f8c562ac4eee43c into c7506490641b44c299fe934404a8134c(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:54,071 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:54,071 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/A, priority=13, startTime=1732101714004; duration=0sec 2024-11-20T11:21:54,071 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:54,071 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:A 2024-11-20T11:21:54,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742383_1559 (size=12765) 2024-11-20T11:21:54,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:54,077 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T11:21:54,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:54,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:54,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:54,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:54,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:54,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:54,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101774081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101774082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112045e43f9c73434544bfa8037fdf2ada69_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101712954/Put/seqid=0 2024-11-20T11:21:54,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101774084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101774084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742384_1560 (size=12454) 2024-11-20T11:21:54,128 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:54,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:54,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101774185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101774185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101774187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101774187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,280 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:54,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:54,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,281 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101774388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101774388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101774389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101774390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,433 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:54,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:54,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,434 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,477 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/6b9258a9c7ca40148db3dbb285f0b7a6 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/6b9258a9c7ca40148db3dbb285f0b7a6 2024-11-20T11:21:54,482 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/C of 2c782dcb9cddcab95f8c562ac4eee43c into 6b9258a9c7ca40148db3dbb285f0b7a6(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:54,482 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:54,482 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/C, priority=13, startTime=1732101714004; duration=0sec 2024-11-20T11:21:54,482 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:54,482 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:C 2024-11-20T11:21:54,489 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:54,491 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112045e43f9c73434544bfa8037fdf2ada69_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112045e43f9c73434544bfa8037fdf2ada69_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:54,492 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/305e3b6a71b849fea83e3219dda479de, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:54,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/305e3b6a71b849fea83e3219dda479de is 175, key is test_row_0/A:col10/1732101712954/Put/seqid=0 2024-11-20T11:21:54,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742385_1561 (size=31255) 2024-11-20T11:21:54,496 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=279, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/305e3b6a71b849fea83e3219dda479de 2024-11-20T11:21:54,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/a9436abd6b4940a7b89b16e5feec965a is 50, key is test_row_0/B:col10/1732101712954/Put/seqid=0 2024-11-20T11:21:54,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742386_1562 (size=12301) 2024-11-20T11:21:54,586 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:54,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:54,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,586 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101774692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101774692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101774692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:54,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101774693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,739 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:54,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:54,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,740 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,891 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:54,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:54,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:54,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:54,892 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:54,905 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/a9436abd6b4940a7b89b16e5feec965a 2024-11-20T11:21:54,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/fa38cc24b2fa4f2182170b10b07e08bf is 50, key is test_row_0/C:col10/1732101712954/Put/seqid=0 2024-11-20T11:21:54,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742387_1563 (size=12301) 2024-11-20T11:21:55,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T11:21:55,043 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:55,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:55,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:55,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:55,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:55,045 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:55,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:55,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:55,197 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:55,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:55,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:55,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101775196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:55,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:55,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:55,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:55,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:55,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101775197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:55,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:55,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:55,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:55,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101775197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:55,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:55,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101775197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:55,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:55,317 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/fa38cc24b2fa4f2182170b10b07e08bf 2024-11-20T11:21:55,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/305e3b6a71b849fea83e3219dda479de as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/305e3b6a71b849fea83e3219dda479de 2024-11-20T11:21:55,326 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/305e3b6a71b849fea83e3219dda479de, entries=150, sequenceid=279, filesize=30.5 K 2024-11-20T11:21:55,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/a9436abd6b4940a7b89b16e5feec965a as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/a9436abd6b4940a7b89b16e5feec965a 2024-11-20T11:21:55,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,331 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/a9436abd6b4940a7b89b16e5feec965a, entries=150, sequenceid=279, filesize=12.0 K 2024-11-20T11:21:55,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/fa38cc24b2fa4f2182170b10b07e08bf as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/fa38cc24b2fa4f2182170b10b07e08bf 2024-11-20T11:21:55,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,335 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/fa38cc24b2fa4f2182170b10b07e08bf, entries=150, sequenceid=279, filesize=12.0 K 2024-11-20T11:21:55,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,336 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 2c782dcb9cddcab95f8c562ac4eee43c in 1258ms, sequenceid=279, compaction requested=false 2024-11-20T11:21:55,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:55,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,350 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:55,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T11:21:55,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:55,351 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T11:21:55,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:55,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:55,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:55,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:55,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:55,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:55,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c23046c8311642c3997143b997ba59dc_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101714082/Put/seqid=0 2024-11-20T11:21:55,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742388_1564 (size=9914) 2024-11-20T11:21:55,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:55,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101775737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:55,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:55,808 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c23046c8311642c3997143b997ba59dc_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c23046c8311642c3997143b997ba59dc_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:55,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/5f033f936354488581df5548df53cfd4, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:55,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/5f033f936354488581df5548df53cfd4 is 175, key is test_row_0/A:col10/1732101714082/Put/seqid=0 2024-11-20T11:21:55,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742389_1565 (size=22561) 2024-11-20T11:21:55,827 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=289, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/5f033f936354488581df5548df53cfd4 2024-11-20T11:21:55,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/514c162baf2940e0b2be9f94d807fbfd is 50, key is test_row_0/B:col10/1732101714082/Put/seqid=0 2024-11-20T11:21:55,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742390_1566 (size=9857) 2024-11-20T11:21:55,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:55,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101775842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:56,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:56,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101776045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:56,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:56,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101776199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:56,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:56,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101776200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:56,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:56,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101776201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:56,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:56,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101776204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:56,242 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/514c162baf2940e0b2be9f94d807fbfd 2024-11-20T11:21:56,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/db439c3535364b1ebfbaa6b5c175846e is 50, key is test_row_0/C:col10/1732101714082/Put/seqid=0 2024-11-20T11:21:56,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742391_1567 (size=9857) 2024-11-20T11:21:56,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:56,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101776347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:56,655 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/db439c3535364b1ebfbaa6b5c175846e 2024-11-20T11:21:56,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/5f033f936354488581df5548df53cfd4 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/5f033f936354488581df5548df53cfd4 2024-11-20T11:21:56,663 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/5f033f936354488581df5548df53cfd4, entries=100, sequenceid=289, filesize=22.0 K 2024-11-20T11:21:56,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/514c162baf2940e0b2be9f94d807fbfd as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/514c162baf2940e0b2be9f94d807fbfd 2024-11-20T11:21:56,667 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/514c162baf2940e0b2be9f94d807fbfd, entries=100, sequenceid=289, filesize=9.6 K 2024-11-20T11:21:56,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/db439c3535364b1ebfbaa6b5c175846e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/db439c3535364b1ebfbaa6b5c175846e 2024-11-20T11:21:56,670 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/db439c3535364b1ebfbaa6b5c175846e, entries=100, sequenceid=289, filesize=9.6 K 2024-11-20T11:21:56,671 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 2c782dcb9cddcab95f8c562ac4eee43c in 1320ms, sequenceid=289, compaction requested=true 2024-11-20T11:21:56,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:56,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:56,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-20T11:21:56,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-11-20T11:21:56,673 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-20T11:21:56,673 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.7730 sec 2024-11-20T11:21:56,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 3.7770 sec 2024-11-20T11:21:56,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(8581): Flush requested on 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:56,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T11:21:56,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:56,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:56,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:56,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:56,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:56,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:56,861 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d95e94125a844658ad4a3565a8240c6c_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101716854/Put/seqid=0 2024-11-20T11:21:56,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742392_1568 (size=12454) 2024-11-20T11:21:56,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:56,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101776868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:56,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:56,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101776970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:57,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T11:21:57,003 INFO [Thread-2175 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-20T11:21:57,004 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T11:21:57,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-11-20T11:21:57,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T11:21:57,006 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T11:21:57,006 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T11:21:57,006 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T11:21:57,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T11:21:57,158 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:57,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T11:21:57,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:57,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:57,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:57,159 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:57,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101777174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:57,253 DEBUG [Thread-2182 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06556601 to 127.0.0.1:62733 2024-11-20T11:21:57,253 DEBUG [Thread-2182 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:57,253 DEBUG [Thread-2184 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x458a85fd to 127.0.0.1:62733 2024-11-20T11:21:57,253 DEBUG [Thread-2184 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:57,254 DEBUG [Thread-2178 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53305d9b to 127.0.0.1:62733 2024-11-20T11:21:57,254 DEBUG [Thread-2178 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:57,255 DEBUG [Thread-2180 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bb6288a to 127.0.0.1:62733 2024-11-20T11:21:57,255 DEBUG [Thread-2180 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:57,255 DEBUG [Thread-2176 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d836f78 to 127.0.0.1:62733 2024-11-20T11:21:57,255 DEBUG [Thread-2176 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:57,265 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:57,268 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d95e94125a844658ad4a3565a8240c6c_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d95e94125a844658ad4a3565a8240c6c_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:57,268 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/a05c69d9551543c48e2bfdb40ec8dcce, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:57,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/a05c69d9551543c48e2bfdb40ec8dcce is 175, key is test_row_0/A:col10/1732101716854/Put/seqid=0 2024-11-20T11:21:57,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742393_1569 (size=31255) 2024-11-20T11:21:57,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T11:21:57,311 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:57,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T11:21:57,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:57,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:57,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:57,311 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,463 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:57,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T11:21:57,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:57,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:57,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:57,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:57,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101777477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:57,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T11:21:57,616 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:57,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T11:21:57,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:57,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:57,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:57,616 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,673 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=317, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/a05c69d9551543c48e2bfdb40ec8dcce 2024-11-20T11:21:57,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/fc135b381b0148ec8d5e23294b704344 is 50, key is test_row_0/B:col10/1732101716854/Put/seqid=0 2024-11-20T11:21:57,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742394_1570 (size=12301) 2024-11-20T11:21:57,768 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:57,769 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T11:21:57,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:57,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:57,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:57,769 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,921 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:57,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T11:21:57,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:57,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:57,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:57,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:57,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:57,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59602 deadline: 1732101777982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:58,073 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:58,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T11:21:58,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:58,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:58,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:58,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:58,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:58,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/fc135b381b0148ec8d5e23294b704344 2024-11-20T11:21:58,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/e895a08410fc440297ec59e3d8092140 is 50, key is test_row_0/C:col10/1732101716854/Put/seqid=0 2024-11-20T11:21:58,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742395_1571 (size=12301) 2024-11-20T11:21:58,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T11:21:58,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:58,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59638 deadline: 1732101778205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:58,205 DEBUG [Thread-2167 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4121 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:21:58,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:58,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59616 deadline: 1732101778215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:58,215 DEBUG [Thread-2173 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4134 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:21:58,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:58,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59594 deadline: 1732101778216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:58,217 DEBUG [Thread-2165 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:21:58,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T11:21:58,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59582 deadline: 1732101778220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:58,220 DEBUG [Thread-2171 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., hostname=ee8338ed7cc0,35185,1732101546666, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T11:21:58,226 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:58,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T11:21:58,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:58,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:58,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:58,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:58,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:58,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:58,378 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:58,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T11:21:58,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:58,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. as already flushing 2024-11-20T11:21:58,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:58,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:58,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:58,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T11:21:58,489 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/e895a08410fc440297ec59e3d8092140 2024-11-20T11:21:58,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/a05c69d9551543c48e2bfdb40ec8dcce as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a05c69d9551543c48e2bfdb40ec8dcce 2024-11-20T11:21:58,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a05c69d9551543c48e2bfdb40ec8dcce, entries=150, sequenceid=317, filesize=30.5 K 2024-11-20T11:21:58,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/fc135b381b0148ec8d5e23294b704344 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/fc135b381b0148ec8d5e23294b704344 2024-11-20T11:21:58,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/fc135b381b0148ec8d5e23294b704344, entries=150, sequenceid=317, filesize=12.0 K 2024-11-20T11:21:58,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/e895a08410fc440297ec59e3d8092140 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/e895a08410fc440297ec59e3d8092140 2024-11-20T11:21:58,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/e895a08410fc440297ec59e3d8092140, entries=150, sequenceid=317, filesize=12.0 K 2024-11-20T11:21:58,501 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 2c782dcb9cddcab95f8c562ac4eee43c in 1646ms, sequenceid=317, compaction requested=true 2024-11-20T11:21:58,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:58,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T11:21:58,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:58,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T11:21:58,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:58,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2c782dcb9cddcab95f8c562ac4eee43c:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T11:21:58,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:58,502 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:21:58,502 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:21:58,502 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 116790 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:21:58,502 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47224 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:21:58,502 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/B is initiating minor compaction (all files) 2024-11-20T11:21:58,502 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/A is initiating minor compaction (all files) 2024-11-20T11:21:58,503 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/A in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:58,503 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/B in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:58,503 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/c38e18b15923413cac710906987a034e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/a9436abd6b4940a7b89b16e5feec965a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/514c162baf2940e0b2be9f94d807fbfd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/fc135b381b0148ec8d5e23294b704344] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=46.1 K 2024-11-20T11:21:58,503 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c7506490641b44c299fe934404a8134c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/305e3b6a71b849fea83e3219dda479de, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/5f033f936354488581df5548df53cfd4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a05c69d9551543c48e2bfdb40ec8dcce] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=114.1 K 2024-11-20T11:21:58,503 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:58,503 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. files: [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c7506490641b44c299fe934404a8134c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/305e3b6a71b849fea83e3219dda479de, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/5f033f936354488581df5548df53cfd4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a05c69d9551543c48e2bfdb40ec8dcce] 2024-11-20T11:21:58,503 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting c38e18b15923413cac710906987a034e, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732101712907 2024-11-20T11:21:58,503 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7506490641b44c299fe934404a8134c, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732101712907 2024-11-20T11:21:58,503 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 305e3b6a71b849fea83e3219dda479de, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732101712951 2024-11-20T11:21:58,503 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting a9436abd6b4940a7b89b16e5feec965a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732101712951 2024-11-20T11:21:58,503 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 514c162baf2940e0b2be9f94d807fbfd, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732101714082 2024-11-20T11:21:58,504 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f033f936354488581df5548df53cfd4, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732101714082 2024-11-20T11:21:58,504 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting fc135b381b0148ec8d5e23294b704344, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732101715728 2024-11-20T11:21:58,504 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] compactions.Compactor(224): Compacting a05c69d9551543c48e2bfdb40ec8dcce, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732101715728 2024-11-20T11:21:58,513 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#B#compaction#477 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:58,513 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/0b1e8d4838b542db9c0f0b6c37ecfb1b is 50, key is test_row_0/B:col10/1732101716854/Put/seqid=0 2024-11-20T11:21:58,514 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:58,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742396_1572 (size=13051) 2024-11-20T11:21:58,524 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112034acf604476d4fbc9468d9419348ae24_2c782dcb9cddcab95f8c562ac4eee43c store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:58,530 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:21:58,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35185 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-20T11:21:58,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:58,531 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T11:21:58,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:21:58,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:58,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:21:58,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:58,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:21:58,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:21:58,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120117af492be0d455496efabb7f68ddcec_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101716861/Put/seqid=0 2024-11-20T11:21:58,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742397_1573 (size=12454) 2024-11-20T11:21:58,554 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112034acf604476d4fbc9468d9419348ae24_2c782dcb9cddcab95f8c562ac4eee43c, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:58,554 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112034acf604476d4fbc9468d9419348ae24_2c782dcb9cddcab95f8c562ac4eee43c because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:58,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742398_1574 (size=4469) 2024-11-20T11:21:58,558 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#A#compaction#478 average throughput is 0.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:58,559 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/bdfa158ae15f4c9989de72bba4b3d90e is 175, key is test_row_0/A:col10/1732101716854/Put/seqid=0 2024-11-20T11:21:58,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742399_1575 (size=32005) 2024-11-20T11:21:58,921 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/0b1e8d4838b542db9c0f0b6c37ecfb1b as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/0b1e8d4838b542db9c0f0b6c37ecfb1b 2024-11-20T11:21:58,924 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/B of 2c782dcb9cddcab95f8c562ac4eee43c into 0b1e8d4838b542db9c0f0b6c37ecfb1b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:58,924 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:58,924 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/B, priority=12, startTime=1732101718502; duration=0sec 2024-11-20T11:21:58,924 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T11:21:58,924 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:B 2024-11-20T11:21:58,925 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T11:21:58,925 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47224 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T11:21:58,925 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1540): 2c782dcb9cddcab95f8c562ac4eee43c/C is initiating minor compaction (all files) 2024-11-20T11:21:58,925 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2c782dcb9cddcab95f8c562ac4eee43c/C in TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:21:58,925 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/6b9258a9c7ca40148db3dbb285f0b7a6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/fa38cc24b2fa4f2182170b10b07e08bf, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/db439c3535364b1ebfbaa6b5c175846e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/e895a08410fc440297ec59e3d8092140] into tmpdir=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp, totalSize=46.1 K 2024-11-20T11:21:58,926 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b9258a9c7ca40148db3dbb285f0b7a6, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732101712907 2024-11-20T11:21:58,926 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting fa38cc24b2fa4f2182170b10b07e08bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732101712951 2024-11-20T11:21:58,926 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting db439c3535364b1ebfbaa6b5c175846e, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732101714082 2024-11-20T11:21:58,926 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] compactions.Compactor(224): Compacting e895a08410fc440297ec59e3d8092140, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732101715728 2024-11-20T11:21:58,932 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2c782dcb9cddcab95f8c562ac4eee43c#C#compaction#480 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T11:21:58,933 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/0dc4c8b4a8014ff999a02e23402b050d is 50, key is test_row_0/C:col10/1732101716854/Put/seqid=0 2024-11-20T11:21:58,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742400_1576 (size=13051) 2024-11-20T11:21:58,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:21:58,952 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120117af492be0d455496efabb7f68ddcec_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120117af492be0d455496efabb7f68ddcec_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:21:58,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/f3e378e09ace43b185eee70d740b0455, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:21:58,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/f3e378e09ace43b185eee70d740b0455 is 175, key is test_row_0/A:col10/1732101716861/Put/seqid=0 2024-11-20T11:21:58,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742401_1577 (size=31255) 2024-11-20T11:21:58,965 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/bdfa158ae15f4c9989de72bba4b3d90e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/bdfa158ae15f4c9989de72bba4b3d90e 2024-11-20T11:21:58,968 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/A of 2c782dcb9cddcab95f8c562ac4eee43c into bdfa158ae15f4c9989de72bba4b3d90e(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:58,968 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:58,968 INFO [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/A, priority=12, startTime=1732101718502; duration=0sec 2024-11-20T11:21:58,968 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:58,968 DEBUG [RS:0;ee8338ed7cc0:35185-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:A 2024-11-20T11:21:58,993 DEBUG [Thread-2169 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17327621 to 127.0.0.1:62733 2024-11-20T11:21:58,993 DEBUG [Thread-2169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:21:59,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T11:21:59,339 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/0dc4c8b4a8014ff999a02e23402b050d as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0dc4c8b4a8014ff999a02e23402b050d 2024-11-20T11:21:59,342 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2c782dcb9cddcab95f8c562ac4eee43c/C of 2c782dcb9cddcab95f8c562ac4eee43c into 0dc4c8b4a8014ff999a02e23402b050d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T11:21:59,342 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:21:59,342 INFO [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c., storeName=2c782dcb9cddcab95f8c562ac4eee43c/C, priority=12, startTime=1732101718502; duration=0sec 2024-11-20T11:21:59,342 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T11:21:59,342 DEBUG [RS:0;ee8338ed7cc0:35185-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2c782dcb9cddcab95f8c562ac4eee43c:C 2024-11-20T11:21:59,356 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=325, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/f3e378e09ace43b185eee70d740b0455 2024-11-20T11:21:59,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/7caed97a42eb45edb65e2de1142b3506 is 50, key is test_row_0/B:col10/1732101716861/Put/seqid=0 2024-11-20T11:21:59,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742402_1578 (size=12301) 2024-11-20T11:21:59,765 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/7caed97a42eb45edb65e2de1142b3506 2024-11-20T11:21:59,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/85cb9f26ceb44c4096a2101cd88db724 is 50, key is test_row_0/C:col10/1732101716861/Put/seqid=0 2024-11-20T11:21:59,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742403_1579 (size=12301) 2024-11-20T11:22:00,174 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/85cb9f26ceb44c4096a2101cd88db724 2024-11-20T11:22:00,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/f3e378e09ace43b185eee70d740b0455 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/f3e378e09ace43b185eee70d740b0455 2024-11-20T11:22:00,179 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/f3e378e09ace43b185eee70d740b0455, entries=150, sequenceid=325, filesize=30.5 K 2024-11-20T11:22:00,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/7caed97a42eb45edb65e2de1142b3506 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/7caed97a42eb45edb65e2de1142b3506 2024-11-20T11:22:00,182 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/7caed97a42eb45edb65e2de1142b3506, entries=150, sequenceid=325, filesize=12.0 K 2024-11-20T11:22:00,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/85cb9f26ceb44c4096a2101cd88db724 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/85cb9f26ceb44c4096a2101cd88db724 2024-11-20T11:22:00,184 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/85cb9f26ceb44c4096a2101cd88db724, entries=150, sequenceid=325, filesize=12.0 K 2024-11-20T11:22:00,185 INFO [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=6.71 KB/6870 for 2c782dcb9cddcab95f8c562ac4eee43c in 1654ms, sequenceid=325, compaction requested=false 2024-11-20T11:22:00,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:22:00,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:22:00,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ee8338ed7cc0:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-11-20T11:22:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-11-20T11:22:00,187 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-20T11:22:00,187 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1800 sec 2024-11-20T11:22:00,188 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 3.1830 sec 2024-11-20T11:22:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T11:22:01,110 INFO [Thread-2175 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-20T11:22:02,230 DEBUG [Thread-2171 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1584f18a to 127.0.0.1:62733 2024-11-20T11:22:02,230 DEBUG [Thread-2171 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:22:02,231 DEBUG [Thread-2165 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d5efb7a to 127.0.0.1:62733 2024-11-20T11:22:02,231 DEBUG [Thread-2165 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:22:02,239 DEBUG [Thread-2167 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fc332d8 to 127.0.0.1:62733 2024-11-20T11:22:02,239 DEBUG [Thread-2167 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:22:02,247 DEBUG [Thread-2173 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b914bf4 to 127.0.0.1:62733 2024-11-20T11:22:02,247 DEBUG [Thread-2173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 47 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 45 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6997 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6861 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6897 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7015 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6861 2024-11-20T11:22:02,247 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T11:22:02,247 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T11:22:02,247 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d9954b7 to 127.0.0.1:62733 2024-11-20T11:22:02,248 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:22:02,248 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T11:22:02,248 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T11:22:02,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T11:22:02,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T11:22:02,250 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101722250"}]},"ts":"1732101722250"} 2024-11-20T11:22:02,251 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T11:22:02,253 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T11:22:02,254 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T11:22:02,254 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c782dcb9cddcab95f8c562ac4eee43c, UNASSIGN}] 2024-11-20T11:22:02,255 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=183, ppid=182, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c782dcb9cddcab95f8c562ac4eee43c, UNASSIGN 2024-11-20T11:22:02,255 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=183 updating hbase:meta row=2c782dcb9cddcab95f8c562ac4eee43c, regionState=CLOSING, regionLocation=ee8338ed7cc0,35185,1732101546666 2024-11-20T11:22:02,256 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T11:22:02,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE; CloseRegionProcedure 2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666}] 2024-11-20T11:22:02,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T11:22:02,407 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ee8338ed7cc0,35185,1732101546666 2024-11-20T11:22:02,407 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] handler.UnassignRegionHandler(124): Close 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:02,408 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T11:22:02,408 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1681): Closing 2c782dcb9cddcab95f8c562ac4eee43c, disabling compactions & flushes 2024-11-20T11:22:02,408 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:22:02,408 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:22:02,408 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. after waiting 0 ms 2024-11-20T11:22:02,408 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:22:02,408 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(2837): Flushing 2c782dcb9cddcab95f8c562ac4eee43c 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T11:22:02,408 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=A 2024-11-20T11:22:02,408 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:22:02,408 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=B 2024-11-20T11:22:02,408 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:22:02,408 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2c782dcb9cddcab95f8c562ac4eee43c, store=C 2024-11-20T11:22:02,408 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T11:22:02,413 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d89153c4d12049cb83ad2b4011d6a3c3_2c782dcb9cddcab95f8c562ac4eee43c is 50, key is test_row_0/A:col10/1732101722238/Put/seqid=0 2024-11-20T11:22:02,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742404_1580 (size=9914) 2024-11-20T11:22:02,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T11:22:02,817 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T11:22:02,819 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d89153c4d12049cb83ad2b4011d6a3c3_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d89153c4d12049cb83ad2b4011d6a3c3_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:02,820 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/eeb6ac10ce154486b6b5ea0b42a9ad06, store: [table=TestAcidGuarantees family=A region=2c782dcb9cddcab95f8c562ac4eee43c] 2024-11-20T11:22:02,821 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/eeb6ac10ce154486b6b5ea0b42a9ad06 is 175, key is test_row_0/A:col10/1732101722238/Put/seqid=0 2024-11-20T11:22:02,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742405_1581 (size=22561) 2024-11-20T11:22:02,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T11:22:03,224 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=336, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/eeb6ac10ce154486b6b5ea0b42a9ad06 2024-11-20T11:22:03,230 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/e44342bbf30f4cd3a2dde12f7a290ef8 is 50, key is test_row_0/B:col10/1732101722238/Put/seqid=0 2024-11-20T11:22:03,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742406_1582 (size=9857) 2024-11-20T11:22:03,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T11:22:03,633 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/e44342bbf30f4cd3a2dde12f7a290ef8 2024-11-20T11:22:03,638 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/45de1041622a492c843416caf9133444 is 50, key is test_row_0/C:col10/1732101722238/Put/seqid=0 2024-11-20T11:22:03,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742407_1583 (size=9857) 2024-11-20T11:22:04,042 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/45de1041622a492c843416caf9133444 2024-11-20T11:22:04,045 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/A/eeb6ac10ce154486b6b5ea0b42a9ad06 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/eeb6ac10ce154486b6b5ea0b42a9ad06 2024-11-20T11:22:04,047 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/eeb6ac10ce154486b6b5ea0b42a9ad06, entries=100, sequenceid=336, filesize=22.0 K 2024-11-20T11:22:04,048 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/B/e44342bbf30f4cd3a2dde12f7a290ef8 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/e44342bbf30f4cd3a2dde12f7a290ef8 2024-11-20T11:22:04,050 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/e44342bbf30f4cd3a2dde12f7a290ef8, entries=100, sequenceid=336, filesize=9.6 K 2024-11-20T11:22:04,051 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/.tmp/C/45de1041622a492c843416caf9133444 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/45de1041622a492c843416caf9133444 2024-11-20T11:22:04,053 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/45de1041622a492c843416caf9133444, entries=100, sequenceid=336, filesize=9.6 K 2024-11-20T11:22:04,054 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 2c782dcb9cddcab95f8c562ac4eee43c in 1646ms, sequenceid=336, compaction requested=true 2024-11-20T11:22:04,054 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d177dffdcbce4dcea5265763c81ec67d, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/7f5aa82f77894bc8b6c7a048861a123a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a50d616f9ce04e1e9d3c1f03aebc41d3, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/783f7a5671c84de3999f583d7db9e667, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d14bee3fb2bc41bab5f6e975ce661402, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1b609dcbacf14d30805fa924b8321a65, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/83b5bbb9df76455098a5add69e2bc7ad, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/8b0ae284ca314fe38c282e6c74daa730, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/3307a9df84c74ac583486648e2f4306a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b0450ed2ecf9468184a499f4e1ba9cd1, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c0406244946741a5ac6b91abd9e38ec0, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1aa1c0ed28c148d1b309f2d25be760a4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/0924388a14f441bb899c154e9b184189, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/77d70493447b433d8bb6699c5ab82f87, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b1e254b116364a619b57beab440d5b9e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1e05e2003662454e8583c57314fbc306, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/596fb69be4174aa989a76bac13d86175, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d699ff2928c549d7a6ac6fe6e97b0f4e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c7506490641b44c299fe934404a8134c, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/305e3b6a71b849fea83e3219dda479de, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/5f033f936354488581df5548df53cfd4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a05c69d9551543c48e2bfdb40ec8dcce] to archive 2024-11-20T11:22:04,055 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:22:04,056 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d177dffdcbce4dcea5265763c81ec67d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d177dffdcbce4dcea5265763c81ec67d 2024-11-20T11:22:04,057 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/7f5aa82f77894bc8b6c7a048861a123a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/7f5aa82f77894bc8b6c7a048861a123a 2024-11-20T11:22:04,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a50d616f9ce04e1e9d3c1f03aebc41d3 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a50d616f9ce04e1e9d3c1f03aebc41d3 2024-11-20T11:22:04,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/783f7a5671c84de3999f583d7db9e667 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/783f7a5671c84de3999f583d7db9e667 2024-11-20T11:22:04,059 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d14bee3fb2bc41bab5f6e975ce661402 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d14bee3fb2bc41bab5f6e975ce661402 2024-11-20T11:22:04,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1b609dcbacf14d30805fa924b8321a65 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1b609dcbacf14d30805fa924b8321a65 2024-11-20T11:22:04,061 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/83b5bbb9df76455098a5add69e2bc7ad to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/83b5bbb9df76455098a5add69e2bc7ad 2024-11-20T11:22:04,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/8b0ae284ca314fe38c282e6c74daa730 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/8b0ae284ca314fe38c282e6c74daa730 2024-11-20T11:22:04,063 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/3307a9df84c74ac583486648e2f4306a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/3307a9df84c74ac583486648e2f4306a 2024-11-20T11:22:04,063 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b0450ed2ecf9468184a499f4e1ba9cd1 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b0450ed2ecf9468184a499f4e1ba9cd1 2024-11-20T11:22:04,064 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c0406244946741a5ac6b91abd9e38ec0 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c0406244946741a5ac6b91abd9e38ec0 2024-11-20T11:22:04,065 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1aa1c0ed28c148d1b309f2d25be760a4 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1aa1c0ed28c148d1b309f2d25be760a4 2024-11-20T11:22:04,066 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/0924388a14f441bb899c154e9b184189 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/0924388a14f441bb899c154e9b184189 2024-11-20T11:22:04,066 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/77d70493447b433d8bb6699c5ab82f87 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/77d70493447b433d8bb6699c5ab82f87 2024-11-20T11:22:04,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b1e254b116364a619b57beab440d5b9e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/b1e254b116364a619b57beab440d5b9e 2024-11-20T11:22:04,068 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1e05e2003662454e8583c57314fbc306 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/1e05e2003662454e8583c57314fbc306 2024-11-20T11:22:04,068 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/596fb69be4174aa989a76bac13d86175 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/596fb69be4174aa989a76bac13d86175 2024-11-20T11:22:04,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d699ff2928c549d7a6ac6fe6e97b0f4e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/d699ff2928c549d7a6ac6fe6e97b0f4e 2024-11-20T11:22:04,070 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c7506490641b44c299fe934404a8134c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/c7506490641b44c299fe934404a8134c 2024-11-20T11:22:04,070 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/305e3b6a71b849fea83e3219dda479de to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/305e3b6a71b849fea83e3219dda479de 2024-11-20T11:22:04,071 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/5f033f936354488581df5548df53cfd4 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/5f033f936354488581df5548df53cfd4 2024-11-20T11:22:04,072 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a05c69d9551543c48e2bfdb40ec8dcce to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/a05c69d9551543c48e2bfdb40ec8dcce 2024-11-20T11:22:04,073 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/669c44d92ee74cc1aaa8c2409441b1c6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/8e234df5e5494253ad694f95bfc1d3b5, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ced9de796ade4769bdd77067413bf73e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ca4890464c004a189076f8f409fbbc03, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/cc1e6299b70a4a18bd7ad3249ea549c4, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ae559198f3b34c6d892b0159fd1d2232, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/df82aab3592741f7a91e35cece687232, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/17723e1a018545fcb77afa709a9ad0b3, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/3fbf94b9e3084871aab1e059c46877a3, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/bc0a20663d494d57957ee69e5c1cae56, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/5aa421e6f5c64e1e83faeb11d9c03a73, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/0b92bc25f9c64ecba0509b1ce5536c1b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/682ec78c85e94d16893fb36d9a6c6a06, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/fc830ece556448b99632ab56b9a4e161, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1fdbe5a4a71e414f97d8483849527e96, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1d57a224d53b481fbfb101fe1b68c477, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1028f87098bc4442a31b1734dfa460ef, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/c38e18b15923413cac710906987a034e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/aa60acc3bfc84136aaa8bc34ee972d47, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/a9436abd6b4940a7b89b16e5feec965a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/514c162baf2940e0b2be9f94d807fbfd, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/fc135b381b0148ec8d5e23294b704344] to archive 2024-11-20T11:22:04,073 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:22:04,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/669c44d92ee74cc1aaa8c2409441b1c6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/669c44d92ee74cc1aaa8c2409441b1c6 2024-11-20T11:22:04,075 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/8e234df5e5494253ad694f95bfc1d3b5 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/8e234df5e5494253ad694f95bfc1d3b5 2024-11-20T11:22:04,076 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ced9de796ade4769bdd77067413bf73e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ced9de796ade4769bdd77067413bf73e 2024-11-20T11:22:04,077 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ca4890464c004a189076f8f409fbbc03 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ca4890464c004a189076f8f409fbbc03 2024-11-20T11:22:04,077 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/cc1e6299b70a4a18bd7ad3249ea549c4 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/cc1e6299b70a4a18bd7ad3249ea549c4 2024-11-20T11:22:04,078 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ae559198f3b34c6d892b0159fd1d2232 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/ae559198f3b34c6d892b0159fd1d2232 2024-11-20T11:22:04,079 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/df82aab3592741f7a91e35cece687232 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/df82aab3592741f7a91e35cece687232 2024-11-20T11:22:04,079 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/17723e1a018545fcb77afa709a9ad0b3 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/17723e1a018545fcb77afa709a9ad0b3 2024-11-20T11:22:04,080 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/3fbf94b9e3084871aab1e059c46877a3 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/3fbf94b9e3084871aab1e059c46877a3 2024-11-20T11:22:04,081 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/bc0a20663d494d57957ee69e5c1cae56 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/bc0a20663d494d57957ee69e5c1cae56 2024-11-20T11:22:04,082 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/5aa421e6f5c64e1e83faeb11d9c03a73 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/5aa421e6f5c64e1e83faeb11d9c03a73 2024-11-20T11:22:04,082 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/0b92bc25f9c64ecba0509b1ce5536c1b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/0b92bc25f9c64ecba0509b1ce5536c1b 2024-11-20T11:22:04,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/682ec78c85e94d16893fb36d9a6c6a06 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/682ec78c85e94d16893fb36d9a6c6a06 2024-11-20T11:22:04,084 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/fc830ece556448b99632ab56b9a4e161 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/fc830ece556448b99632ab56b9a4e161 2024-11-20T11:22:04,085 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1fdbe5a4a71e414f97d8483849527e96 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1fdbe5a4a71e414f97d8483849527e96 2024-11-20T11:22:04,086 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1d57a224d53b481fbfb101fe1b68c477 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1d57a224d53b481fbfb101fe1b68c477 2024-11-20T11:22:04,086 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1028f87098bc4442a31b1734dfa460ef to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/1028f87098bc4442a31b1734dfa460ef 2024-11-20T11:22:04,087 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/c38e18b15923413cac710906987a034e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/c38e18b15923413cac710906987a034e 2024-11-20T11:22:04,088 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/aa60acc3bfc84136aaa8bc34ee972d47 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/aa60acc3bfc84136aaa8bc34ee972d47 2024-11-20T11:22:04,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/a9436abd6b4940a7b89b16e5feec965a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/a9436abd6b4940a7b89b16e5feec965a 2024-11-20T11:22:04,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/514c162baf2940e0b2be9f94d807fbfd to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/514c162baf2940e0b2be9f94d807fbfd 2024-11-20T11:22:04,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/fc135b381b0148ec8d5e23294b704344 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/fc135b381b0148ec8d5e23294b704344 2024-11-20T11:22:04,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/d40935d20da44ebb8ab4303d47d3ea2a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/bcbf4686a7e94ed0a8f61ab61451aa90, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/3bd90095352240c1b031f237564ee5c9, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/67b6706c38a04fce8f330d2e08430fc3, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/6b72eaa0017e4a519745cff19518a691, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/2780df3f37124fa6856ef4966462091b, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/1f14a77624474c7d901e34f441cb4338, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/deb00a200b274f4cb82a37fe86c30dab, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/b149ce8445c642d0810d9ed2366d6d62, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0b9366762bef406986f17c971691ea45, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/d661693c006449f29275b40d3e270aa6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/051c3cd9a7f34e2296f22d40aa90ed89, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/f50437251bad4d6fab69da4ec7d6a666, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/3ecfe82a7c9b452ab4ff987f873e15a2, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/ea726b822597415082cbdde2cde0cd9e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/89d2b3f36e5243c9badb580dae7f8a0a, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0aa3eebbeb794e63ba81cec349bf8674, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/6b9258a9c7ca40148db3dbb285f0b7a6, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/b6bbbd83c6844ac1ba062e21318c68bc, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/fa38cc24b2fa4f2182170b10b07e08bf, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/db439c3535364b1ebfbaa6b5c175846e, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/e895a08410fc440297ec59e3d8092140] to archive 2024-11-20T11:22:04,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T11:22:04,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/d40935d20da44ebb8ab4303d47d3ea2a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/d40935d20da44ebb8ab4303d47d3ea2a 2024-11-20T11:22:04,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/bcbf4686a7e94ed0a8f61ab61451aa90 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/bcbf4686a7e94ed0a8f61ab61451aa90 2024-11-20T11:22:04,094 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/3bd90095352240c1b031f237564ee5c9 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/3bd90095352240c1b031f237564ee5c9 2024-11-20T11:22:04,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/67b6706c38a04fce8f330d2e08430fc3 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/67b6706c38a04fce8f330d2e08430fc3 2024-11-20T11:22:04,096 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/6b72eaa0017e4a519745cff19518a691 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/6b72eaa0017e4a519745cff19518a691 2024-11-20T11:22:04,096 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/2780df3f37124fa6856ef4966462091b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/2780df3f37124fa6856ef4966462091b 2024-11-20T11:22:04,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/1f14a77624474c7d901e34f441cb4338 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/1f14a77624474c7d901e34f441cb4338 2024-11-20T11:22:04,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/deb00a200b274f4cb82a37fe86c30dab to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/deb00a200b274f4cb82a37fe86c30dab 2024-11-20T11:22:04,099 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/b149ce8445c642d0810d9ed2366d6d62 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/b149ce8445c642d0810d9ed2366d6d62 2024-11-20T11:22:04,099 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0b9366762bef406986f17c971691ea45 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0b9366762bef406986f17c971691ea45 2024-11-20T11:22:04,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/d661693c006449f29275b40d3e270aa6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/d661693c006449f29275b40d3e270aa6 2024-11-20T11:22:04,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/051c3cd9a7f34e2296f22d40aa90ed89 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/051c3cd9a7f34e2296f22d40aa90ed89 2024-11-20T11:22:04,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/f50437251bad4d6fab69da4ec7d6a666 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/f50437251bad4d6fab69da4ec7d6a666 2024-11-20T11:22:04,104 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/3ecfe82a7c9b452ab4ff987f873e15a2 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/3ecfe82a7c9b452ab4ff987f873e15a2 2024-11-20T11:22:04,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/ea726b822597415082cbdde2cde0cd9e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/ea726b822597415082cbdde2cde0cd9e 2024-11-20T11:22:04,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/89d2b3f36e5243c9badb580dae7f8a0a to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/89d2b3f36e5243c9badb580dae7f8a0a 2024-11-20T11:22:04,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0aa3eebbeb794e63ba81cec349bf8674 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0aa3eebbeb794e63ba81cec349bf8674 2024-11-20T11:22:04,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/6b9258a9c7ca40148db3dbb285f0b7a6 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/6b9258a9c7ca40148db3dbb285f0b7a6 2024-11-20T11:22:04,109 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/b6bbbd83c6844ac1ba062e21318c68bc to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/b6bbbd83c6844ac1ba062e21318c68bc 2024-11-20T11:22:04,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/fa38cc24b2fa4f2182170b10b07e08bf to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/fa38cc24b2fa4f2182170b10b07e08bf 2024-11-20T11:22:04,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/db439c3535364b1ebfbaa6b5c175846e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/db439c3535364b1ebfbaa6b5c175846e 2024-11-20T11:22:04,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/e895a08410fc440297ec59e3d8092140 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/e895a08410fc440297ec59e3d8092140 2024-11-20T11:22:04,115 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/recovered.edits/339.seqid, newMaxSeqId=339, maxSeqId=4 2024-11-20T11:22:04,115 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c. 2024-11-20T11:22:04,115 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1635): Region close journal for 2c782dcb9cddcab95f8c562ac4eee43c: 2024-11-20T11:22:04,116 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] handler.UnassignRegionHandler(170): Closed 2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,117 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=183 updating hbase:meta row=2c782dcb9cddcab95f8c562ac4eee43c, regionState=CLOSED 2024-11-20T11:22:04,118 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-11-20T11:22:04,118 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; CloseRegionProcedure 2c782dcb9cddcab95f8c562ac4eee43c, server=ee8338ed7cc0,35185,1732101546666 in 1.8610 sec 2024-11-20T11:22:04,119 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=182 2024-11-20T11:22:04,119 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=182, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2c782dcb9cddcab95f8c562ac4eee43c, UNASSIGN in 1.8640 sec 2024-11-20T11:22:04,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-11-20T11:22:04,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8650 sec 2024-11-20T11:22:04,121 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732101724121"}]},"ts":"1732101724121"} 2024-11-20T11:22:04,122 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T11:22:04,124 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T11:22:04,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8770 sec 2024-11-20T11:22:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-20T11:22:04,354 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-11-20T11:22:04,354 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T11:22:04,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:22:04,355 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=185, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:22:04,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-20T11:22:04,356 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=185, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:22:04,358 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,359 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C, FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/recovered.edits] 2024-11-20T11:22:04,361 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/bdfa158ae15f4c9989de72bba4b3d90e to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/bdfa158ae15f4c9989de72bba4b3d90e 2024-11-20T11:22:04,362 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/eeb6ac10ce154486b6b5ea0b42a9ad06 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/eeb6ac10ce154486b6b5ea0b42a9ad06 2024-11-20T11:22:04,363 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/f3e378e09ace43b185eee70d740b0455 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/A/f3e378e09ace43b185eee70d740b0455 2024-11-20T11:22:04,364 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/0b1e8d4838b542db9c0f0b6c37ecfb1b to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/0b1e8d4838b542db9c0f0b6c37ecfb1b 2024-11-20T11:22:04,365 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/7caed97a42eb45edb65e2de1142b3506 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/7caed97a42eb45edb65e2de1142b3506 2024-11-20T11:22:04,366 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/e44342bbf30f4cd3a2dde12f7a290ef8 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/B/e44342bbf30f4cd3a2dde12f7a290ef8 2024-11-20T11:22:04,367 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0dc4c8b4a8014ff999a02e23402b050d to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/0dc4c8b4a8014ff999a02e23402b050d 2024-11-20T11:22:04,368 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/45de1041622a492c843416caf9133444 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/45de1041622a492c843416caf9133444 2024-11-20T11:22:04,369 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/85cb9f26ceb44c4096a2101cd88db724 to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/C/85cb9f26ceb44c4096a2101cd88db724 2024-11-20T11:22:04,371 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/recovered.edits/339.seqid to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c/recovered.edits/339.seqid 2024-11-20T11:22:04,371 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/default/TestAcidGuarantees/2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,371 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T11:22:04,372 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T11:22:04,372 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T11:22:04,374 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120117af492be0d455496efabb7f68ddcec_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120117af492be0d455496efabb7f68ddcec_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,375 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112033bd7b781b7f4fdca8ab5b1292fc6935_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112033bd7b781b7f4fdca8ab5b1292fc6935_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,376 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120436855d4d5ec4007932a892a6fe7063e_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120436855d4d5ec4007932a892a6fe7063e_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,377 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112045e43f9c73434544bfa8037fdf2ada69_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112045e43f9c73434544bfa8037fdf2ada69_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,378 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204625e8976a77409c9db8c353bad82c18_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204625e8976a77409c9db8c353bad82c18_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,379 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205ee386c457014cbeb563abfb6634a485_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205ee386c457014cbeb563abfb6634a485_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,380 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206a3417395c294824b634e22fca758409_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206a3417395c294824b634e22fca758409_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,381 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120833ddc9cbfdc43a2bd554c1717b3d82b_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120833ddc9cbfdc43a2bd554c1717b3d82b_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,382 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120936ac1273f5747308cdcb595bf4a78d9_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120936ac1273f5747308cdcb595bf4a78d9_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,382 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c23046c8311642c3997143b997ba59dc_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c23046c8311642c3997143b997ba59dc_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,383 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c2a778c6f5614e389dc19d6697b7c957_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c2a778c6f5614e389dc19d6697b7c957_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,384 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c2d68b26ef2d432db5dcecaecd4f5d4a_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c2d68b26ef2d432db5dcecaecd4f5d4a_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,385 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d17ca5d07f3543f6b7443234c857fc7c_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d17ca5d07f3543f6b7443234c857fc7c_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,386 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d33f239fd67e4a27930d35a6bf8e759b_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d33f239fd67e4a27930d35a6bf8e759b_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,387 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d89153c4d12049cb83ad2b4011d6a3c3_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d89153c4d12049cb83ad2b4011d6a3c3_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,387 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d95e94125a844658ad4a3565a8240c6c_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d95e94125a844658ad4a3565a8240c6c_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,388 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dd7c545e423549beb86294922d4cf57c_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dd7c545e423549beb86294922d4cf57c_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,389 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120eb2025115fc04653b53f2d4c29ad4797_2c782dcb9cddcab95f8c562ac4eee43c to hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120eb2025115fc04653b53f2d4c29ad4797_2c782dcb9cddcab95f8c562ac4eee43c 2024-11-20T11:22:04,390 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T11:22:04,391 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=185, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:22:04,394 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T11:22:04,395 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T11:22:04,396 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=185, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:22:04,396 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T11:22:04,396 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732101724396"}]},"ts":"9223372036854775807"} 2024-11-20T11:22:04,398 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T11:22:04,398 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2c782dcb9cddcab95f8c562ac4eee43c, NAME => 'TestAcidGuarantees,,1732101694189.2c782dcb9cddcab95f8c562ac4eee43c.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T11:22:04,398 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T11:22:04,398 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732101724398"}]},"ts":"9223372036854775807"} 2024-11-20T11:22:04,399 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T11:22:04,401 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=185, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T11:22:04,402 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 48 msec 2024-11-20T11:22:04,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36055 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-20T11:22:04,456 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 185 completed 2024-11-20T11:22:04,465 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=239 (was 241), OpenFileDescriptor=453 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=295 (was 268) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5306 (was 5759) 2024-11-20T11:22:04,465 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-20T11:22:04,465 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T11:22:04,466 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e541e88 to 127.0.0.1:62733 2024-11-20T11:22:04,466 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:22:04,466 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T11:22:04,466 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=669332021, stopped=false 2024-11-20T11:22:04,466 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=ee8338ed7cc0,36055,1732101545920 2024-11-20T11:22:04,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T11:22:04,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T11:22:04,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:22:04,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:22:04,468 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-20T11:22:04,469 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:22:04,469 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T11:22:04,469 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T11:22:04,469 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'ee8338ed7cc0,35185,1732101546666' ***** 2024-11-20T11:22:04,469 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-20T11:22:04,469 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T11:22:04,470 INFO [RS:0;ee8338ed7cc0:35185 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T11:22:04,470 INFO [RS:0;ee8338ed7cc0:35185 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T11:22:04,470 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(3579): Received CLOSE for 2931c42a5e0431c7e1d9a63f9b78ad4e 2024-11-20T11:22:04,470 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1224): stopping server ee8338ed7cc0,35185,1732101546666 2024-11-20T11:22:04,470 DEBUG [RS:0;ee8338ed7cc0:35185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:22:04,470 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T11:22:04,470 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T11:22:04,470 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T11:22:04,470 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-20T11:22:04,470 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 2931c42a5e0431c7e1d9a63f9b78ad4e, disabling compactions & flushes 2024-11-20T11:22:04,470 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-20T11:22:04,470 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. 2024-11-20T11:22:04,470 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 2931c42a5e0431c7e1d9a63f9b78ad4e=hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e.} 2024-11-20T11:22:04,470 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-20T11:22:04,471 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. 2024-11-20T11:22:04,471 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. after waiting 0 ms 2024-11-20T11:22:04,471 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. 2024-11-20T11:22:04,471 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 2931c42a5e0431c7e1d9a63f9b78ad4e 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-20T11:22:04,471 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T11:22:04,471 INFO [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T11:22:04,471 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T11:22:04,471 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T11:22:04,471 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T11:22:04,471 INFO [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-20T11:22:04,471 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 2931c42a5e0431c7e1d9a63f9b78ad4e 2024-11-20T11:22:04,493 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/namespace/2931c42a5e0431c7e1d9a63f9b78ad4e/.tmp/info/52f64b36d89d494682d84e22b9676973 is 45, key is default/info:d/1732101551571/Put/seqid=0 2024-11-20T11:22:04,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742408_1584 (size=5037) 2024-11-20T11:22:04,497 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/.tmp/info/626e11e8284f493a8bd43ca54436df8e is 143, key is hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e./info:regioninfo/1732101551461/Put/seqid=0 2024-11-20T11:22:04,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742409_1585 (size=7725) 2024-11-20T11:22:04,541 INFO [regionserver/ee8338ed7cc0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T11:22:04,671 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 2931c42a5e0431c7e1d9a63f9b78ad4e 2024-11-20T11:22:04,871 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 2931c42a5e0431c7e1d9a63f9b78ad4e 2024-11-20T11:22:04,897 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/namespace/2931c42a5e0431c7e1d9a63f9b78ad4e/.tmp/info/52f64b36d89d494682d84e22b9676973 2024-11-20T11:22:04,900 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/namespace/2931c42a5e0431c7e1d9a63f9b78ad4e/.tmp/info/52f64b36d89d494682d84e22b9676973 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/namespace/2931c42a5e0431c7e1d9a63f9b78ad4e/info/52f64b36d89d494682d84e22b9676973 2024-11-20T11:22:04,901 INFO [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/.tmp/info/626e11e8284f493a8bd43ca54436df8e 2024-11-20T11:22:04,903 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/namespace/2931c42a5e0431c7e1d9a63f9b78ad4e/info/52f64b36d89d494682d84e22b9676973, entries=2, sequenceid=6, filesize=4.9 K 2024-11-20T11:22:04,903 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 2931c42a5e0431c7e1d9a63f9b78ad4e in 432ms, sequenceid=6, compaction requested=false 2024-11-20T11:22:04,907 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/namespace/2931c42a5e0431c7e1d9a63f9b78ad4e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T11:22:04,907 INFO [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. 2024-11-20T11:22:04,907 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 2931c42a5e0431c7e1d9a63f9b78ad4e: 2024-11-20T11:22:04,907 DEBUG [RS_CLOSE_REGION-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732101550221.2931c42a5e0431c7e1d9a63f9b78ad4e. 2024-11-20T11:22:04,919 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/.tmp/rep_barrier/83f9c44e7cbe4af4a337d626d5a44152 is 102, key is TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75./rep_barrier:/1732101576555/DeleteFamily/seqid=0 2024-11-20T11:22:04,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742410_1586 (size=6025) 2024-11-20T11:22:04,930 INFO [regionserver/ee8338ed7cc0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T11:22:04,930 INFO [regionserver/ee8338ed7cc0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T11:22:05,056 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T11:22:05,072 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T11:22:05,272 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T11:22:05,322 INFO [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/.tmp/rep_barrier/83f9c44e7cbe4af4a337d626d5a44152 2024-11-20T11:22:05,340 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/.tmp/table/ae35a55a10654bab9efb6d43630dbc09 is 96, key is TestAcidGuarantees,,1732101551799.96d866d8db5bf8a73bb64ed0351e8f75./table:/1732101576555/DeleteFamily/seqid=0 2024-11-20T11:22:05,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742411_1587 (size=5942) 2024-11-20T11:22:05,472 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-20T11:22:05,472 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T11:22:05,472 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T11:22:05,672 DEBUG [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T11:22:05,744 INFO [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/.tmp/table/ae35a55a10654bab9efb6d43630dbc09 2024-11-20T11:22:05,747 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/.tmp/info/626e11e8284f493a8bd43ca54436df8e as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/info/626e11e8284f493a8bd43ca54436df8e 2024-11-20T11:22:05,750 INFO [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/info/626e11e8284f493a8bd43ca54436df8e, entries=22, sequenceid=93, filesize=7.5 K 2024-11-20T11:22:05,750 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/.tmp/rep_barrier/83f9c44e7cbe4af4a337d626d5a44152 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/rep_barrier/83f9c44e7cbe4af4a337d626d5a44152 2024-11-20T11:22:05,753 INFO [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/rep_barrier/83f9c44e7cbe4af4a337d626d5a44152, entries=6, sequenceid=93, filesize=5.9 K 2024-11-20T11:22:05,753 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/.tmp/table/ae35a55a10654bab9efb6d43630dbc09 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/table/ae35a55a10654bab9efb6d43630dbc09 2024-11-20T11:22:05,756 INFO [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/table/ae35a55a10654bab9efb6d43630dbc09, entries=9, sequenceid=93, filesize=5.8 K 2024-11-20T11:22:05,756 INFO [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1285ms, sequenceid=93, compaction requested=false 2024-11-20T11:22:05,759 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-20T11:22:05,760 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T11:22:05,760 INFO [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T11:22:05,760 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T11:22:05,760 DEBUG [RS_CLOSE_META-regionserver/ee8338ed7cc0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T11:22:05,873 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1250): stopping server ee8338ed7cc0,35185,1732101546666; all regions closed. 2024-11-20T11:22:05,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741834_1010 (size=26050) 2024-11-20T11:22:05,878 DEBUG [RS:0;ee8338ed7cc0:35185 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/oldWALs 2024-11-20T11:22:05,878 INFO [RS:0;ee8338ed7cc0:35185 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL ee8338ed7cc0%2C35185%2C1732101546666.meta:.meta(num 1732101549974) 2024-11-20T11:22:05,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741832_1008 (size=13240531) 2024-11-20T11:22:05,882 DEBUG [RS:0;ee8338ed7cc0:35185 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/oldWALs 2024-11-20T11:22:05,882 INFO [RS:0;ee8338ed7cc0:35185 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL ee8338ed7cc0%2C35185%2C1732101546666:(num 1732101549035) 2024-11-20T11:22:05,882 DEBUG [RS:0;ee8338ed7cc0:35185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:22:05,882 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T11:22:05,882 INFO [RS:0;ee8338ed7cc0:35185 {}] hbase.ChoreService(370): Chore service for: regionserver/ee8338ed7cc0:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-20T11:22:05,883 INFO [regionserver/ee8338ed7cc0:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T11:22:05,883 INFO [RS:0;ee8338ed7cc0:35185 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35185 2024-11-20T11:22:05,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ee8338ed7cc0,35185,1732101546666 2024-11-20T11:22:05,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T11:22:05,888 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ee8338ed7cc0,35185,1732101546666] 2024-11-20T11:22:05,888 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing ee8338ed7cc0,35185,1732101546666; numProcessing=1 2024-11-20T11:22:05,889 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/ee8338ed7cc0,35185,1732101546666 already deleted, retry=false 2024-11-20T11:22:05,889 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; ee8338ed7cc0,35185,1732101546666 expired; onlineServers=0 2024-11-20T11:22:05,889 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'ee8338ed7cc0,36055,1732101545920' ***** 2024-11-20T11:22:05,889 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T11:22:05,889 DEBUG [M:0;ee8338ed7cc0:36055 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40ecf8f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ee8338ed7cc0/172.17.0.2:0 2024-11-20T11:22:05,889 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegionServer(1224): stopping server ee8338ed7cc0,36055,1732101545920 2024-11-20T11:22:05,889 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegionServer(1250): stopping server ee8338ed7cc0,36055,1732101545920; all regions closed. 2024-11-20T11:22:05,889 DEBUG [M:0;ee8338ed7cc0:36055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T11:22:05,889 DEBUG [M:0;ee8338ed7cc0:36055 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T11:22:05,889 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T11:22:05,890 DEBUG [M:0;ee8338ed7cc0:36055 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T11:22:05,890 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster-HFileCleaner.small.0-1732101548732 {}] cleaner.HFileCleaner(306): Exit Thread[master/ee8338ed7cc0:0:becomeActiveMaster-HFileCleaner.small.0-1732101548732,5,FailOnTimeoutGroup] 2024-11-20T11:22:05,890 DEBUG [master/ee8338ed7cc0:0:becomeActiveMaster-HFileCleaner.large.0-1732101548732 {}] cleaner.HFileCleaner(306): Exit Thread[master/ee8338ed7cc0:0:becomeActiveMaster-HFileCleaner.large.0-1732101548732,5,FailOnTimeoutGroup] 2024-11-20T11:22:05,890 INFO [M:0;ee8338ed7cc0:36055 {}] hbase.ChoreService(370): Chore service for: master/ee8338ed7cc0:0 had [] on shutdown 2024-11-20T11:22:05,890 DEBUG [M:0;ee8338ed7cc0:36055 {}] master.HMaster(1733): Stopping service threads 2024-11-20T11:22:05,890 INFO [M:0;ee8338ed7cc0:36055 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T11:22:05,890 ERROR [M:0;ee8338ed7cc0:36055 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:43109 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:43109,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-20T11:22:05,891 INFO [M:0;ee8338ed7cc0:36055 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T11:22:05,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T11:22:05,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T11:22:05,891 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T11:22:05,891 DEBUG [M:0;ee8338ed7cc0:36055 {}] zookeeper.ZKUtil(347): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T11:22:05,891 WARN [M:0;ee8338ed7cc0:36055 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T11:22:05,891 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T11:22:05,891 INFO [M:0;ee8338ed7cc0:36055 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-20T11:22:05,891 INFO [M:0;ee8338ed7cc0:36055 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T11:22:05,891 DEBUG [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T11:22:05,891 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T11:22:05,891 DEBUG [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T11:22:05,891 DEBUG [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T11:22:05,891 DEBUG [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T11:22:05,892 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=751.42 KB heapSize=922.25 KB 2024-11-20T11:22:05,907 DEBUG [M:0;ee8338ed7cc0:36055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/898acb9df5a64d3f8ab479f9f0786870 is 82, key is hbase:meta,,1/info:regioninfo/1732101550114/Put/seqid=0 2024-11-20T11:22:05,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742412_1588 (size=5672) 2024-11-20T11:22:05,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T11:22:05,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35185-0x10014a7d58e0001, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T11:22:05,988 INFO [RS:0;ee8338ed7cc0:35185 {}] regionserver.HRegionServer(1307): Exiting; stopping=ee8338ed7cc0,35185,1732101546666; zookeeper connection closed. 2024-11-20T11:22:05,988 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@10c04307 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@10c04307 2024-11-20T11:22:05,989 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T11:22:06,310 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2091 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/898acb9df5a64d3f8ab479f9f0786870 2024-11-20T11:22:06,335 DEBUG [M:0;ee8338ed7cc0:36055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e841ae1522c84bee923f2b314382e2fc is 2278, key is \x00\x00\x00\x00\x00\x00\x00\xA0/proc:d/1732101697202/Put/seqid=0 2024-11-20T11:22:06,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742413_1589 (size=46064) 2024-11-20T11:22:06,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T11:22:06,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T11:22:06,417 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-20T11:22:06,417 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T11:22:06,739 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750.87 KB at sequenceid=2091 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e841ae1522c84bee923f2b314382e2fc 2024-11-20T11:22:06,742 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e841ae1522c84bee923f2b314382e2fc 2024-11-20T11:22:06,762 DEBUG [M:0;ee8338ed7cc0:36055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3a51f66b848849b4be31df6399196380 is 69, key is ee8338ed7cc0,35185,1732101546666/rs:state/1732101548805/Put/seqid=0 2024-11-20T11:22:06,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073742414_1590 (size=5156) 2024-11-20T11:22:07,166 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2091 (bloomFilter=true), to=hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3a51f66b848849b4be31df6399196380 2024-11-20T11:22:07,170 DEBUG [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/898acb9df5a64d3f8ab479f9f0786870 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/898acb9df5a64d3f8ab479f9f0786870 2024-11-20T11:22:07,173 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/898acb9df5a64d3f8ab479f9f0786870, entries=8, sequenceid=2091, filesize=5.5 K 2024-11-20T11:22:07,174 DEBUG [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e841ae1522c84bee923f2b314382e2fc as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e841ae1522c84bee923f2b314382e2fc 2024-11-20T11:22:07,177 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e841ae1522c84bee923f2b314382e2fc 2024-11-20T11:22:07,177 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e841ae1522c84bee923f2b314382e2fc, entries=185, sequenceid=2091, filesize=45.0 K 2024-11-20T11:22:07,178 DEBUG [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3a51f66b848849b4be31df6399196380 as hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3a51f66b848849b4be31df6399196380 2024-11-20T11:22:07,181 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43109/user/jenkins/test-data/45d008b6-7080-14c9-6932-c6e066dcb830/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3a51f66b848849b4be31df6399196380, entries=1, sequenceid=2091, filesize=5.0 K 2024-11-20T11:22:07,182 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegion(3040): Finished flush of dataSize ~751.42 KB/769459, heapSize ~921.95 KB/944080, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1290ms, sequenceid=2091, compaction requested=false 2024-11-20T11:22:07,189 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T11:22:07,189 DEBUG [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T11:22:07,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38643 is added to blk_1073741830_1006 (size=906637) 2024-11-20T11:22:07,192 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T11:22:07,192 INFO [M:0;ee8338ed7cc0:36055 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-20T11:22:07,192 INFO [M:0;ee8338ed7cc0:36055 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36055 2024-11-20T11:22:07,194 DEBUG [M:0;ee8338ed7cc0:36055 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/ee8338ed7cc0,36055,1732101545920 already deleted, retry=false 2024-11-20T11:22:07,297 INFO [M:0;ee8338ed7cc0:36055 {}] regionserver.HRegionServer(1307): Exiting; stopping=ee8338ed7cc0,36055,1732101545920; zookeeper connection closed. 2024-11-20T11:22:07,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T11:22:07,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36055-0x10014a7d58e0000, quorum=127.0.0.1:62733, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T11:22:07,303 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T11:22:07,306 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T11:22:07,306 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T11:22:07,306 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T11:22:07,306 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/hadoop.log.dir/,STOPPED} 2024-11-20T11:22:07,310 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T11:22:07,310 WARN [BP-348945642-172.17.0.2-1732101543140 heartbeating to localhost/127.0.0.1:43109 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T11:22:07,310 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T11:22:07,310 WARN [BP-348945642-172.17.0.2-1732101543140 heartbeating to localhost/127.0.0.1:43109 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-348945642-172.17.0.2-1732101543140 (Datanode Uuid 204d1068-de64-4920-8a56-72bfc6d41169) service to localhost/127.0.0.1:43109 2024-11-20T11:22:07,312 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/cluster_e3ab1e07-46f1-e4fd-8280-4ad2ac7fbc56/dfs/data/data1/current/BP-348945642-172.17.0.2-1732101543140 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T11:22:07,312 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/cluster_e3ab1e07-46f1-e4fd-8280-4ad2ac7fbc56/dfs/data/data2/current/BP-348945642-172.17.0.2-1732101543140 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T11:22:07,312 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T11:22:07,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T11:22:07,322 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T11:22:07,322 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T11:22:07,322 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T11:22:07,322 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f220ae79-e9a4-f869-e9f6-f3bff43894e0/hadoop.log.dir/,STOPPED} 2024-11-20T11:22:07,342 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-20T11:22:07,470 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down